4141 * @buf_size: size of one rx or tx buffer
4242 * @last_sbuf: index of last tx buffer used
4343 * @bufs_dma: dma base addr of the buffers
44- * @tx_lock: protects svq, sbufs and sleepers , to allow concurrent senders.
44+ * @tx_lock: protects svq and sbufs , to allow concurrent senders.
4545 * sending a message might require waking up a dozing remote
4646 * processor, which involves sleeping, hence the mutex.
4747 * @endpoints: idr of local endpoints, allows fast retrieval
4848 * @endpoints_lock: lock of the endpoints set
4949 * @sendq: wait queue of sending contexts waiting for a tx buffers
50- * @sleepers: number of senders that are waiting for a tx buffer
5150 *
5251 * This structure stores the rpmsg state of a given virtio remote processor
5352 * device (there might be several virtio proc devices for each physical
@@ -65,7 +64,6 @@ struct virtproc_info {
6564 struct idr endpoints ;
6665 struct mutex endpoints_lock ;
6766 wait_queue_head_t sendq ;
68- atomic_t sleepers ;
6967};
7068
7169/* The feature bitmap for virtio rpmsg */
@@ -144,6 +142,8 @@ static int virtio_rpmsg_sendto(struct rpmsg_endpoint *ept, void *data, int len,
144142static int virtio_rpmsg_trysend (struct rpmsg_endpoint * ept , void * data , int len );
145143static int virtio_rpmsg_trysendto (struct rpmsg_endpoint * ept , void * data ,
146144 int len , u32 dst );
145+ static __poll_t virtio_rpmsg_poll (struct rpmsg_endpoint * ept , struct file * filp ,
146+ poll_table * wait );
147147static ssize_t virtio_rpmsg_get_mtu (struct rpmsg_endpoint * ept );
148148static struct rpmsg_device * __rpmsg_create_channel (struct virtproc_info * vrp ,
149149 struct rpmsg_channel_info * chinfo );
@@ -154,6 +154,7 @@ static const struct rpmsg_endpoint_ops virtio_endpoint_ops = {
154154 .sendto = virtio_rpmsg_sendto ,
155155 .trysend = virtio_rpmsg_trysend ,
156156 .trysendto = virtio_rpmsg_trysendto ,
157+ .poll = virtio_rpmsg_poll ,
157158 .get_mtu = virtio_rpmsg_get_mtu ,
158159};
159160
@@ -436,7 +437,6 @@ static void *get_a_tx_buf(struct virtproc_info *vrp)
436437 unsigned int len ;
437438 void * ret ;
438439
439- /* support multiple concurrent senders */
440440 mutex_lock (& vrp -> tx_lock );
441441
442442 /*
@@ -454,62 +454,6 @@ static void *get_a_tx_buf(struct virtproc_info *vrp)
454454 return ret ;
455455}
456456
457- /**
458- * rpmsg_upref_sleepers() - enable "tx-complete" interrupts, if needed
459- * @vrp: virtual remote processor state
460- *
461- * This function is called before a sender is blocked, waiting for
462- * a tx buffer to become available.
463- *
464- * If we already have blocking senders, this function merely increases
465- * the "sleepers" reference count, and exits.
466- *
467- * Otherwise, if this is the first sender to block, we also enable
468- * virtio's tx callbacks, so we'd be immediately notified when a tx
469- * buffer is consumed (we rely on virtio's tx callback in order
470- * to wake up sleeping senders as soon as a tx buffer is used by the
471- * remote processor).
472- */
473- static void rpmsg_upref_sleepers (struct virtproc_info * vrp )
474- {
475- /* support multiple concurrent senders */
476- mutex_lock (& vrp -> tx_lock );
477-
478- /* are we the first sleeping context waiting for tx buffers ? */
479- if (atomic_inc_return (& vrp -> sleepers ) == 1 )
480- /* enable "tx-complete" interrupts before dozing off */
481- virtqueue_enable_cb (vrp -> svq );
482-
483- mutex_unlock (& vrp -> tx_lock );
484- }
485-
486- /**
487- * rpmsg_downref_sleepers() - disable "tx-complete" interrupts, if needed
488- * @vrp: virtual remote processor state
489- *
490- * This function is called after a sender, that waited for a tx buffer
491- * to become available, is unblocked.
492- *
493- * If we still have blocking senders, this function merely decreases
494- * the "sleepers" reference count, and exits.
495- *
496- * Otherwise, if there are no more blocking senders, we also disable
497- * virtio's tx callbacks, to avoid the overhead incurred with handling
498- * those (now redundant) interrupts.
499- */
500- static void rpmsg_downref_sleepers (struct virtproc_info * vrp )
501- {
502- /* support multiple concurrent senders */
503- mutex_lock (& vrp -> tx_lock );
504-
505- /* are we the last sleeping context waiting for tx buffers ? */
506- if (atomic_dec_and_test (& vrp -> sleepers ))
507- /* disable "tx-complete" interrupts */
508- virtqueue_disable_cb (vrp -> svq );
509-
510- mutex_unlock (& vrp -> tx_lock );
511- }
512-
513457/**
514458 * rpmsg_send_offchannel_raw() - send a message across to the remote processor
515459 * @rpdev: the rpmsg channel
@@ -582,9 +526,6 @@ static int rpmsg_send_offchannel_raw(struct rpmsg_device *rpdev,
582526
583527 /* no free buffer ? wait for one (but bail after 15 seconds) */
584528 while (!msg ) {
585- /* enable "tx-complete" interrupts, if not already enabled */
586- rpmsg_upref_sleepers (vrp );
587-
588529 /*
589530 * sleep until a free buffer is available or 15 secs elapse.
590531 * the timeout period is not configurable because there's
@@ -595,9 +536,6 @@ static int rpmsg_send_offchannel_raw(struct rpmsg_device *rpdev,
595536 (msg = get_a_tx_buf (vrp )),
596537 msecs_to_jiffies (15000 ));
597538
598- /* disable "tx-complete" interrupts if we're the last sleeper */
599- rpmsg_downref_sleepers (vrp );
600-
601539 /* timeout ? */
602540 if (!err ) {
603541 dev_err (dev , "timeout waiting for a tx buffer\n" );
@@ -676,6 +614,34 @@ static int virtio_rpmsg_trysendto(struct rpmsg_endpoint *ept, void *data,
676614 return rpmsg_send_offchannel_raw (rpdev , src , dst , data , len , false);
677615}
678616
617+ static __poll_t virtio_rpmsg_poll (struct rpmsg_endpoint * ept , struct file * filp ,
618+ poll_table * wait )
619+ {
620+ struct rpmsg_device * rpdev = ept -> rpdev ;
621+ struct virtio_rpmsg_channel * vch = to_virtio_rpmsg_channel (rpdev );
622+ struct virtproc_info * vrp = vch -> vrp ;
623+ __poll_t mask = 0 ;
624+
625+ poll_wait (filp , & vrp -> sendq , wait );
626+
627+ /* support multiple concurrent senders */
628+ mutex_lock (& vrp -> tx_lock );
629+
630+ /*
631+ * check for a free buffer, either:
632+ * - we haven't used all of the available transmit buffers (half of the
633+ * allocated buffers are used for transmit, hence num_bufs / 2), or,
634+ * - we ask the virtqueue if there's a buffer available
635+ */
636+ if (vrp -> last_sbuf < vrp -> num_bufs / 2 ||
637+ !virtqueue_enable_cb (vrp -> svq ))
638+ mask |= EPOLLOUT ;
639+
640+ mutex_unlock (& vrp -> tx_lock );
641+
642+ return mask ;
643+ }
644+
679645static ssize_t virtio_rpmsg_get_mtu (struct rpmsg_endpoint * ept )
680646{
681647 struct rpmsg_device * rpdev = ept -> rpdev ;
@@ -922,9 +888,6 @@ static int rpmsg_probe(struct virtio_device *vdev)
922888 WARN_ON (err ); /* sanity check; this can't really happen */
923889 }
924890
925- /* suppress "tx-complete" interrupts */
926- virtqueue_disable_cb (vrp -> svq );
927-
928891 vdev -> priv = vrp ;
929892
930893 rpdev_ctrl = rpmsg_virtio_add_ctrl_dev (vdev );
0 commit comments