@@ -284,72 +284,6 @@ int queue_limits_set(struct request_queue *q, struct queue_limits *lim)
284284}
285285EXPORT_SYMBOL_GPL (queue_limits_set );
286286
287- /**
288- * blk_queue_bounce_limit - set bounce buffer limit for queue
289- * @q: the request queue for the device
290- * @bounce: bounce limit to enforce
291- *
292- * Description:
293- * Force bouncing for ISA DMA ranges or highmem.
294- *
295- * DEPRECATED, don't use in new code.
296- **/
297- void blk_queue_bounce_limit (struct request_queue * q , enum blk_bounce bounce )
298- {
299- q -> limits .bounce = bounce ;
300- }
301- EXPORT_SYMBOL (blk_queue_bounce_limit );
302-
303- /**
304- * blk_queue_max_hw_sectors - set max sectors for a request for this queue
305- * @q: the request queue for the device
306- * @max_hw_sectors: max hardware sectors in the usual 512b unit
307- *
308- * Description:
309- * Enables a low level driver to set a hard upper limit,
310- * max_hw_sectors, on the size of requests. max_hw_sectors is set by
311- * the device driver based upon the capabilities of the I/O
312- * controller.
313- *
314- * max_dev_sectors is a hard limit imposed by the storage device for
315- * READ/WRITE requests. It is set by the disk driver.
316- *
317- * max_sectors is a soft limit imposed by the block layer for
318- * filesystem type requests. This value can be overridden on a
319- * per-device basis in /sys/block/<device>/queue/max_sectors_kb.
320- * The soft limit can not exceed max_hw_sectors.
321- **/
322- void blk_queue_max_hw_sectors (struct request_queue * q , unsigned int max_hw_sectors )
323- {
324- struct queue_limits * limits = & q -> limits ;
325- unsigned int max_sectors ;
326-
327- if ((max_hw_sectors << 9 ) < PAGE_SIZE ) {
328- max_hw_sectors = 1 << (PAGE_SHIFT - 9 );
329- pr_info ("%s: set to minimum %u\n" , __func__ , max_hw_sectors );
330- }
331-
332- max_hw_sectors = round_down (max_hw_sectors ,
333- limits -> logical_block_size >> SECTOR_SHIFT );
334- limits -> max_hw_sectors = max_hw_sectors ;
335-
336- max_sectors = min_not_zero (max_hw_sectors , limits -> max_dev_sectors );
337-
338- if (limits -> max_user_sectors )
339- max_sectors = min (max_sectors , limits -> max_user_sectors );
340- else
341- max_sectors = min (max_sectors , BLK_DEF_MAX_SECTORS_CAP );
342-
343- max_sectors = round_down (max_sectors ,
344- limits -> logical_block_size >> SECTOR_SHIFT );
345- limits -> max_sectors = max_sectors ;
346-
347- if (!q -> disk )
348- return ;
349- q -> disk -> bdi -> io_pages = max_sectors >> (PAGE_SHIFT - 9 );
350- }
351- EXPORT_SYMBOL (blk_queue_max_hw_sectors );
352-
353287/**
354288 * blk_queue_chunk_sectors - set size of the chunk for this queue
355289 * @q: the request queue for the device
@@ -436,65 +370,6 @@ void blk_queue_max_zone_append_sectors(struct request_queue *q,
436370}
437371EXPORT_SYMBOL_GPL (blk_queue_max_zone_append_sectors );
438372
439- /**
440- * blk_queue_max_segments - set max hw segments for a request for this queue
441- * @q: the request queue for the device
442- * @max_segments: max number of segments
443- *
444- * Description:
445- * Enables a low level driver to set an upper limit on the number of
446- * hw data segments in a request.
447- **/
448- void blk_queue_max_segments (struct request_queue * q , unsigned short max_segments )
449- {
450- if (!max_segments ) {
451- max_segments = 1 ;
452- pr_info ("%s: set to minimum %u\n" , __func__ , max_segments );
453- }
454-
455- q -> limits .max_segments = max_segments ;
456- }
457- EXPORT_SYMBOL (blk_queue_max_segments );
458-
459- /**
460- * blk_queue_max_discard_segments - set max segments for discard requests
461- * @q: the request queue for the device
462- * @max_segments: max number of segments
463- *
464- * Description:
465- * Enables a low level driver to set an upper limit on the number of
466- * segments in a discard request.
467- **/
468- void blk_queue_max_discard_segments (struct request_queue * q ,
469- unsigned short max_segments )
470- {
471- q -> limits .max_discard_segments = max_segments ;
472- }
473- EXPORT_SYMBOL_GPL (blk_queue_max_discard_segments );
474-
475- /**
476- * blk_queue_max_segment_size - set max segment size for blk_rq_map_sg
477- * @q: the request queue for the device
478- * @max_size: max size of segment in bytes
479- *
480- * Description:
481- * Enables a low level driver to set an upper limit on the size of a
482- * coalesced segment
483- **/
484- void blk_queue_max_segment_size (struct request_queue * q , unsigned int max_size )
485- {
486- if (max_size < PAGE_SIZE ) {
487- max_size = PAGE_SIZE ;
488- pr_info ("%s: set to minimum %u\n" , __func__ , max_size );
489- }
490-
491- /* see blk_queue_virt_boundary() for the explanation */
492- WARN_ON_ONCE (q -> limits .virt_boundary_mask );
493-
494- q -> limits .max_segment_size = max_size ;
495- }
496- EXPORT_SYMBOL (blk_queue_max_segment_size );
497-
498373/**
499374 * blk_queue_logical_block_size - set logical block size for the queue
500375 * @q: the request queue for the device
@@ -661,29 +536,6 @@ void blk_limits_io_opt(struct queue_limits *limits, unsigned int opt)
661536}
662537EXPORT_SYMBOL (blk_limits_io_opt );
663538
664- /**
665- * blk_queue_io_opt - set optimal request size for the queue
666- * @q: the request queue for the device
667- * @opt: optimal request size in bytes
668- *
669- * Description:
670- * Storage devices may report an optimal I/O size, which is the
671- * device's preferred unit for sustained I/O. This is rarely reported
672- * for disk drives. For RAID arrays it is usually the stripe width or
673- * the internal track size. A properly aligned multiple of
674- * optimal_io_size is the preferred request size for workloads where
675- * sustained throughput is desired.
676- */
677- void blk_queue_io_opt (struct request_queue * q , unsigned int opt )
678- {
679- blk_limits_io_opt (& q -> limits , opt );
680- if (!q -> disk )
681- return ;
682- q -> disk -> bdi -> ra_pages =
683- max (queue_io_opt (q ) * 2 / PAGE_SIZE , VM_READAHEAD_PAGES );
684- }
685- EXPORT_SYMBOL (blk_queue_io_opt );
686-
687539static int queue_limit_alignment_offset (const struct queue_limits * lim ,
688540 sector_t sector )
689541{
@@ -933,81 +785,6 @@ void blk_queue_update_dma_pad(struct request_queue *q, unsigned int mask)
933785}
934786EXPORT_SYMBOL (blk_queue_update_dma_pad );
935787
936- /**
937- * blk_queue_segment_boundary - set boundary rules for segment merging
938- * @q: the request queue for the device
939- * @mask: the memory boundary mask
940- **/
941- void blk_queue_segment_boundary (struct request_queue * q , unsigned long mask )
942- {
943- if (mask < PAGE_SIZE - 1 ) {
944- mask = PAGE_SIZE - 1 ;
945- pr_info ("%s: set to minimum %lx\n" , __func__ , mask );
946- }
947-
948- q -> limits .seg_boundary_mask = mask ;
949- }
950- EXPORT_SYMBOL (blk_queue_segment_boundary );
951-
952- /**
953- * blk_queue_virt_boundary - set boundary rules for bio merging
954- * @q: the request queue for the device
955- * @mask: the memory boundary mask
956- **/
957- void blk_queue_virt_boundary (struct request_queue * q , unsigned long mask )
958- {
959- q -> limits .virt_boundary_mask = mask ;
960-
961- /*
962- * Devices that require a virtual boundary do not support scatter/gather
963- * I/O natively, but instead require a descriptor list entry for each
964- * page (which might not be idential to the Linux PAGE_SIZE). Because
965- * of that they are not limited by our notion of "segment size".
966- */
967- if (mask )
968- q -> limits .max_segment_size = UINT_MAX ;
969- }
970- EXPORT_SYMBOL (blk_queue_virt_boundary );
971-
972- /**
973- * blk_queue_dma_alignment - set dma length and memory alignment
974- * @q: the request queue for the device
975- * @mask: alignment mask
976- *
977- * description:
978- * set required memory and length alignment for direct dma transactions.
979- * this is used when building direct io requests for the queue.
980- *
981- **/
982- void blk_queue_dma_alignment (struct request_queue * q , int mask )
983- {
984- q -> limits .dma_alignment = mask ;
985- }
986- EXPORT_SYMBOL (blk_queue_dma_alignment );
987-
988- /**
989- * blk_queue_update_dma_alignment - update dma length and memory alignment
990- * @q: the request queue for the device
991- * @mask: alignment mask
992- *
993- * description:
994- * update required memory and length alignment for direct dma transactions.
995- * If the requested alignment is larger than the current alignment, then
996- * the current queue alignment is updated to the new value, otherwise it
997- * is left alone. The design of this is to allow multiple objects
998- * (driver, device, transport etc) to set their respective
999- * alignments without having them interfere.
1000- *
1001- **/
1002- void blk_queue_update_dma_alignment (struct request_queue * q , int mask )
1003- {
1004- BUG_ON (mask > PAGE_SIZE );
1005-
1006- if (mask > q -> limits .dma_alignment )
1007- q -> limits .dma_alignment = mask ;
1008- }
1009- EXPORT_SYMBOL (blk_queue_update_dma_alignment );
1010-
1011788/**
1012789 * blk_set_queue_depth - tell the block layer about the device queue depth
1013790 * @q: the request queue for the device
@@ -1061,28 +838,6 @@ void blk_queue_required_elevator_features(struct request_queue *q,
1061838}
1062839EXPORT_SYMBOL_GPL (blk_queue_required_elevator_features );
1063840
1064- /**
1065- * blk_queue_can_use_dma_map_merging - configure queue for merging segments.
1066- * @q: the request queue for the device
1067- * @dev: the device pointer for dma
1068- *
1069- * Tell the block layer about merging the segments by dma map of @q.
1070- */
1071- bool blk_queue_can_use_dma_map_merging (struct request_queue * q ,
1072- struct device * dev )
1073- {
1074- unsigned long boundary = dma_get_merge_boundary (dev );
1075-
1076- if (!boundary )
1077- return false;
1078-
1079- /* No need to update max_segment_size. see blk_queue_virt_boundary() */
1080- blk_queue_virt_boundary (q , boundary );
1081-
1082- return true;
1083- }
1084- EXPORT_SYMBOL_GPL (blk_queue_can_use_dma_map_merging );
1085-
1086841/**
1087842 * disk_set_zoned - inidicate a zoned device
1088843 * @disk: gendisk to configure
0 commit comments