Skip to content

Commit 35fab92

Browse files
committed
Merge tag 'for-linus-6.4-rc1-tag' of git://git.kernel.org/pub/scm/linux/kernel/git/xen/tip
Pull xen updates from Juergen Gross: - some cleanups in the Xen blkback driver - fix potential sleeps under lock in various Xen drivers * tag 'for-linus-6.4-rc1-tag' of git://git.kernel.org/pub/scm/linux/kernel/git/xen/tip: xen/blkback: move blkif_get_x86_*_req() into blkback.c xen/blkback: simplify free_persistent_gnts() interface xen/blkback: remove stale prototype xen/blkback: fix white space code style issues xen/pvcalls: don't call bind_evtchn_to_irqhandler() under lock xen/scsiback: don't call scsiback_free_translation_entry() under lock xen/pciback: don't call pcistub_device_put() under lock
2 parents da46b58 + cbfac77 commit 35fab92

5 files changed

Lines changed: 160 additions & 148 deletions

File tree

drivers/block/xen-blkback/blkback.c

Lines changed: 115 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -239,16 +239,19 @@ static void put_persistent_gnt(struct xen_blkif_ring *ring,
239239
atomic_dec(&ring->persistent_gnt_in_use);
240240
}
241241

242-
static void free_persistent_gnts(struct xen_blkif_ring *ring, struct rb_root *root,
243-
unsigned int num)
242+
static void free_persistent_gnts(struct xen_blkif_ring *ring)
244243
{
244+
struct rb_root *root = &ring->persistent_gnts;
245245
struct gnttab_unmap_grant_ref unmap[BLKIF_MAX_SEGMENTS_PER_REQUEST];
246246
struct page *pages[BLKIF_MAX_SEGMENTS_PER_REQUEST];
247247
struct persistent_gnt *persistent_gnt;
248248
struct rb_node *n;
249249
int segs_to_unmap = 0;
250250
struct gntab_unmap_queue_data unmap_data;
251251

252+
if (RB_EMPTY_ROOT(root))
253+
return;
254+
252255
unmap_data.pages = pages;
253256
unmap_data.unmap_ops = unmap;
254257
unmap_data.kunmap_ops = NULL;
@@ -277,9 +280,11 @@ static void free_persistent_gnts(struct xen_blkif_ring *ring, struct rb_root *ro
277280

278281
rb_erase(&persistent_gnt->node, root);
279282
kfree(persistent_gnt);
280-
num--;
283+
ring->persistent_gnt_c--;
281284
}
282-
BUG_ON(num != 0);
285+
286+
BUG_ON(!RB_EMPTY_ROOT(&ring->persistent_gnts));
287+
BUG_ON(ring->persistent_gnt_c != 0);
283288
}
284289

285290
void xen_blkbk_unmap_purged_grants(struct work_struct *work)
@@ -631,12 +636,7 @@ int xen_blkif_schedule(void *arg)
631636
void xen_blkbk_free_caches(struct xen_blkif_ring *ring)
632637
{
633638
/* Free all persistent grant pages */
634-
if (!RB_EMPTY_ROOT(&ring->persistent_gnts))
635-
free_persistent_gnts(ring, &ring->persistent_gnts,
636-
ring->persistent_gnt_c);
637-
638-
BUG_ON(!RB_EMPTY_ROOT(&ring->persistent_gnts));
639-
ring->persistent_gnt_c = 0;
639+
free_persistent_gnts(ring);
640640

641641
/* Since we are shutting down remove all pages from the buffer */
642642
gnttab_page_cache_shrink(&ring->free_pages, 0 /* All */);
@@ -891,7 +891,7 @@ static int xen_blkbk_map(struct xen_blkif_ring *ring,
891891
out:
892892
for (i = last_map; i < num; i++) {
893893
/* Don't zap current batch's valid persistent grants. */
894-
if(i >= map_until)
894+
if (i >= map_until)
895895
pages[i]->persistent_gnt = NULL;
896896
pages[i]->handle = BLKBACK_INVALID_HANDLE;
897897
}
@@ -1072,7 +1072,111 @@ static void end_block_io_op(struct bio *bio)
10721072
bio_put(bio);
10731073
}
10741074

1075+
static void blkif_get_x86_32_req(struct blkif_request *dst,
1076+
const struct blkif_x86_32_request *src)
1077+
{
1078+
unsigned int i, n;
1079+
1080+
dst->operation = READ_ONCE(src->operation);
1081+
1082+
switch (dst->operation) {
1083+
case BLKIF_OP_READ:
1084+
case BLKIF_OP_WRITE:
1085+
case BLKIF_OP_WRITE_BARRIER:
1086+
case BLKIF_OP_FLUSH_DISKCACHE:
1087+
dst->u.rw.nr_segments = READ_ONCE(src->u.rw.nr_segments);
1088+
dst->u.rw.handle = src->u.rw.handle;
1089+
dst->u.rw.id = src->u.rw.id;
1090+
dst->u.rw.sector_number = src->u.rw.sector_number;
1091+
n = min_t(unsigned int, BLKIF_MAX_SEGMENTS_PER_REQUEST,
1092+
dst->u.rw.nr_segments);
1093+
for (i = 0; i < n; i++)
1094+
dst->u.rw.seg[i] = src->u.rw.seg[i];
1095+
break;
1096+
1097+
case BLKIF_OP_DISCARD:
1098+
dst->u.discard.flag = src->u.discard.flag;
1099+
dst->u.discard.id = src->u.discard.id;
1100+
dst->u.discard.sector_number = src->u.discard.sector_number;
1101+
dst->u.discard.nr_sectors = src->u.discard.nr_sectors;
1102+
break;
1103+
1104+
case BLKIF_OP_INDIRECT:
1105+
dst->u.indirect.indirect_op = src->u.indirect.indirect_op;
1106+
dst->u.indirect.nr_segments =
1107+
READ_ONCE(src->u.indirect.nr_segments);
1108+
dst->u.indirect.handle = src->u.indirect.handle;
1109+
dst->u.indirect.id = src->u.indirect.id;
1110+
dst->u.indirect.sector_number = src->u.indirect.sector_number;
1111+
n = min(MAX_INDIRECT_PAGES,
1112+
INDIRECT_PAGES(dst->u.indirect.nr_segments));
1113+
for (i = 0; i < n; i++)
1114+
dst->u.indirect.indirect_grefs[i] =
1115+
src->u.indirect.indirect_grefs[i];
1116+
break;
1117+
1118+
default:
1119+
/*
1120+
* Don't know how to translate this op. Only get the
1121+
* ID so failure can be reported to the frontend.
1122+
*/
1123+
dst->u.other.id = src->u.other.id;
1124+
break;
1125+
}
1126+
}
1127+
1128+
static void blkif_get_x86_64_req(struct blkif_request *dst,
1129+
const struct blkif_x86_64_request *src)
1130+
{
1131+
unsigned int i, n;
1132+
1133+
dst->operation = READ_ONCE(src->operation);
1134+
1135+
switch (dst->operation) {
1136+
case BLKIF_OP_READ:
1137+
case BLKIF_OP_WRITE:
1138+
case BLKIF_OP_WRITE_BARRIER:
1139+
case BLKIF_OP_FLUSH_DISKCACHE:
1140+
dst->u.rw.nr_segments = READ_ONCE(src->u.rw.nr_segments);
1141+
dst->u.rw.handle = src->u.rw.handle;
1142+
dst->u.rw.id = src->u.rw.id;
1143+
dst->u.rw.sector_number = src->u.rw.sector_number;
1144+
n = min_t(unsigned int, BLKIF_MAX_SEGMENTS_PER_REQUEST,
1145+
dst->u.rw.nr_segments);
1146+
for (i = 0; i < n; i++)
1147+
dst->u.rw.seg[i] = src->u.rw.seg[i];
1148+
break;
1149+
1150+
case BLKIF_OP_DISCARD:
1151+
dst->u.discard.flag = src->u.discard.flag;
1152+
dst->u.discard.id = src->u.discard.id;
1153+
dst->u.discard.sector_number = src->u.discard.sector_number;
1154+
dst->u.discard.nr_sectors = src->u.discard.nr_sectors;
1155+
break;
1156+
1157+
case BLKIF_OP_INDIRECT:
1158+
dst->u.indirect.indirect_op = src->u.indirect.indirect_op;
1159+
dst->u.indirect.nr_segments =
1160+
READ_ONCE(src->u.indirect.nr_segments);
1161+
dst->u.indirect.handle = src->u.indirect.handle;
1162+
dst->u.indirect.id = src->u.indirect.id;
1163+
dst->u.indirect.sector_number = src->u.indirect.sector_number;
1164+
n = min(MAX_INDIRECT_PAGES,
1165+
INDIRECT_PAGES(dst->u.indirect.nr_segments));
1166+
for (i = 0; i < n; i++)
1167+
dst->u.indirect.indirect_grefs[i] =
1168+
src->u.indirect.indirect_grefs[i];
1169+
break;
10751170

1171+
default:
1172+
/*
1173+
* Don't know how to translate this op. Only get the
1174+
* ID so failure can be reported to the frontend.
1175+
*/
1176+
dst->u.other.id = src->u.other.id;
1177+
break;
1178+
}
1179+
}
10761180

10771181
/*
10781182
* Function to copy the from the ring buffer the 'struct blkif_request'

drivers/block/xen-blkback/common.h

Lines changed: 3 additions & 100 deletions
Original file line numberDiff line numberDiff line change
@@ -296,7 +296,7 @@ struct xen_blkif_ring {
296296
struct work_struct free_work;
297297
/* Thread shutdown wait queue. */
298298
wait_queue_head_t shutdown_wq;
299-
struct xen_blkif *blkif;
299+
struct xen_blkif *blkif;
300300
};
301301

302302
struct xen_blkif {
@@ -315,7 +315,7 @@ struct xen_blkif {
315315
atomic_t drain;
316316

317317
struct work_struct free_work;
318-
unsigned int nr_ring_pages;
318+
unsigned int nr_ring_pages;
319319
bool multi_ref;
320320
/* All rings for this device. */
321321
struct xen_blkif_ring *rings;
@@ -329,7 +329,7 @@ struct seg_buf {
329329
};
330330

331331
struct grant_page {
332-
struct page *page;
332+
struct page *page;
333333
struct persistent_gnt *persistent_gnt;
334334
grant_handle_t handle;
335335
grant_ref_t gref;
@@ -384,7 +384,6 @@ void xen_blkif_xenbus_fini(void);
384384

385385
irqreturn_t xen_blkif_be_int(int irq, void *dev_id);
386386
int xen_blkif_schedule(void *arg);
387-
int xen_blkif_purge_persistent(void *arg);
388387
void xen_blkbk_free_caches(struct xen_blkif_ring *ring);
389388

390389
int xen_blkbk_flush_diskcache(struct xenbus_transaction xbt,
@@ -395,100 +394,4 @@ int xen_blkbk_barrier(struct xenbus_transaction xbt,
395394
struct xenbus_device *xen_blkbk_xenbus(struct backend_info *be);
396395
void xen_blkbk_unmap_purged_grants(struct work_struct *work);
397396

398-
static inline void blkif_get_x86_32_req(struct blkif_request *dst,
399-
struct blkif_x86_32_request *src)
400-
{
401-
int i, n = BLKIF_MAX_SEGMENTS_PER_REQUEST, j;
402-
dst->operation = READ_ONCE(src->operation);
403-
switch (dst->operation) {
404-
case BLKIF_OP_READ:
405-
case BLKIF_OP_WRITE:
406-
case BLKIF_OP_WRITE_BARRIER:
407-
case BLKIF_OP_FLUSH_DISKCACHE:
408-
dst->u.rw.nr_segments = src->u.rw.nr_segments;
409-
dst->u.rw.handle = src->u.rw.handle;
410-
dst->u.rw.id = src->u.rw.id;
411-
dst->u.rw.sector_number = src->u.rw.sector_number;
412-
barrier();
413-
if (n > dst->u.rw.nr_segments)
414-
n = dst->u.rw.nr_segments;
415-
for (i = 0; i < n; i++)
416-
dst->u.rw.seg[i] = src->u.rw.seg[i];
417-
break;
418-
case BLKIF_OP_DISCARD:
419-
dst->u.discard.flag = src->u.discard.flag;
420-
dst->u.discard.id = src->u.discard.id;
421-
dst->u.discard.sector_number = src->u.discard.sector_number;
422-
dst->u.discard.nr_sectors = src->u.discard.nr_sectors;
423-
break;
424-
case BLKIF_OP_INDIRECT:
425-
dst->u.indirect.indirect_op = src->u.indirect.indirect_op;
426-
dst->u.indirect.nr_segments = src->u.indirect.nr_segments;
427-
dst->u.indirect.handle = src->u.indirect.handle;
428-
dst->u.indirect.id = src->u.indirect.id;
429-
dst->u.indirect.sector_number = src->u.indirect.sector_number;
430-
barrier();
431-
j = min(MAX_INDIRECT_PAGES, INDIRECT_PAGES(dst->u.indirect.nr_segments));
432-
for (i = 0; i < j; i++)
433-
dst->u.indirect.indirect_grefs[i] =
434-
src->u.indirect.indirect_grefs[i];
435-
break;
436-
default:
437-
/*
438-
* Don't know how to translate this op. Only get the
439-
* ID so failure can be reported to the frontend.
440-
*/
441-
dst->u.other.id = src->u.other.id;
442-
break;
443-
}
444-
}
445-
446-
static inline void blkif_get_x86_64_req(struct blkif_request *dst,
447-
struct blkif_x86_64_request *src)
448-
{
449-
int i, n = BLKIF_MAX_SEGMENTS_PER_REQUEST, j;
450-
dst->operation = READ_ONCE(src->operation);
451-
switch (dst->operation) {
452-
case BLKIF_OP_READ:
453-
case BLKIF_OP_WRITE:
454-
case BLKIF_OP_WRITE_BARRIER:
455-
case BLKIF_OP_FLUSH_DISKCACHE:
456-
dst->u.rw.nr_segments = src->u.rw.nr_segments;
457-
dst->u.rw.handle = src->u.rw.handle;
458-
dst->u.rw.id = src->u.rw.id;
459-
dst->u.rw.sector_number = src->u.rw.sector_number;
460-
barrier();
461-
if (n > dst->u.rw.nr_segments)
462-
n = dst->u.rw.nr_segments;
463-
for (i = 0; i < n; i++)
464-
dst->u.rw.seg[i] = src->u.rw.seg[i];
465-
break;
466-
case BLKIF_OP_DISCARD:
467-
dst->u.discard.flag = src->u.discard.flag;
468-
dst->u.discard.id = src->u.discard.id;
469-
dst->u.discard.sector_number = src->u.discard.sector_number;
470-
dst->u.discard.nr_sectors = src->u.discard.nr_sectors;
471-
break;
472-
case BLKIF_OP_INDIRECT:
473-
dst->u.indirect.indirect_op = src->u.indirect.indirect_op;
474-
dst->u.indirect.nr_segments = src->u.indirect.nr_segments;
475-
dst->u.indirect.handle = src->u.indirect.handle;
476-
dst->u.indirect.id = src->u.indirect.id;
477-
dst->u.indirect.sector_number = src->u.indirect.sector_number;
478-
barrier();
479-
j = min(MAX_INDIRECT_PAGES, INDIRECT_PAGES(dst->u.indirect.nr_segments));
480-
for (i = 0; i < j; i++)
481-
dst->u.indirect.indirect_grefs[i] =
482-
src->u.indirect.indirect_grefs[i];
483-
break;
484-
default:
485-
/*
486-
* Don't know how to translate this op. Only get the
487-
* ID so failure can be reported to the frontend.
488-
*/
489-
dst->u.other.id = src->u.other.id;
490-
break;
491-
}
492-
}
493-
494397
#endif /* __XEN_BLKIF__BACKEND__COMMON_H__ */

0 commit comments

Comments
 (0)