Skip to content

Commit 557602f

Browse files
committed
ipmi:msghandler: Deliver user messages in a work queue
This simplifies the locking and lets us remove some weird event handling code. deliver_response() and friends can now be called from an atomic context. Signed-off-by: Corey Minyard <cminyard@mvista.com>
1 parent 7422198 commit 557602f

1 file changed

Lines changed: 25 additions & 16 deletions

File tree

drivers/char/ipmi/ipmi_msghandler.c

Lines changed: 25 additions & 16 deletions
Original file line numberDiff line numberDiff line change
@@ -495,6 +495,12 @@ struct ipmi_smi {
495495
struct seq_table seq_table[IPMI_IPMB_NUM_SEQ];
496496
int curr_seq;
497497

498+
/*
499+
* Messages queued for deliver to the user.
500+
*/
501+
struct mutex user_msgs_mutex;
502+
struct list_head user_msgs;
503+
498504
/*
499505
* Messages queued for delivery. If delivery fails (out of memory
500506
* for instance), They will stay in here to be processed later in a
@@ -525,7 +531,6 @@ struct ipmi_smi {
525531
spinlock_t events_lock; /* For dealing with event stuff. */
526532
struct list_head waiting_events;
527533
unsigned int waiting_events_count; /* How many events in queue? */
528-
char delivering_events;
529534
char event_msg_printed;
530535

531536
/* How many users are waiting for events? */
@@ -945,9 +950,13 @@ static int deliver_response(struct ipmi_smi *intf, struct ipmi_recv_msg *msg)
945950
struct ipmi_user *user = acquire_ipmi_user(msg->user, &index);
946951

947952
if (user) {
948-
atomic_dec(&user->nr_msgs);
949-
user->handler->ipmi_recv_hndl(msg, user->handler_data);
953+
/* Deliver it in smi_work. */
954+
kref_get(&user->refcount);
955+
mutex_lock(&intf->user_msgs_mutex);
956+
list_add_tail(&msg->link, &intf->user_msgs);
957+
mutex_unlock(&intf->user_msgs_mutex);
950958
release_ipmi_user(user, index);
959+
queue_work(system_bh_wq, &intf->smi_work);
951960
} else {
952961
/* User went away, give up. */
953962
ipmi_free_recv_msg(msg);
@@ -1610,13 +1619,6 @@ int ipmi_set_gets_events(struct ipmi_user *user, bool val)
16101619
atomic_dec(&intf->event_waiters);
16111620
}
16121621

1613-
if (intf->delivering_events)
1614-
/*
1615-
* Another thread is delivering events for this, so
1616-
* let it handle any new events.
1617-
*/
1618-
goto out;
1619-
16201622
/* Deliver any queued events. */
16211623
while (user->gets_events && !list_empty(&intf->waiting_events)) {
16221624
list_for_each_entry_safe(msg, msg2, &intf->waiting_events, link)
@@ -1627,17 +1629,11 @@ int ipmi_set_gets_events(struct ipmi_user *user, bool val)
16271629
intf->event_msg_printed = 0;
16281630
}
16291631

1630-
intf->delivering_events = 1;
1631-
spin_unlock_irqrestore(&intf->events_lock, flags);
1632-
16331632
list_for_each_entry_safe(msg, msg2, &msgs, link) {
16341633
msg->user = user;
16351634
kref_get(&user->refcount);
16361635
deliver_local_response(intf, msg);
16371636
}
1638-
1639-
spin_lock_irqsave(&intf->events_lock, flags);
1640-
intf->delivering_events = 0;
16411637
}
16421638

16431639
out:
@@ -3590,6 +3586,8 @@ int ipmi_add_smi(struct module *owner,
35903586
}
35913587
if (slave_addr != 0)
35923588
intf->addrinfo[0].address = slave_addr;
3589+
INIT_LIST_HEAD(&intf->user_msgs);
3590+
mutex_init(&intf->user_msgs_mutex);
35933591
INIT_LIST_HEAD(&intf->users);
35943592
atomic_set(&intf->nr_users, 0);
35953593
intf->handlers = handlers;
@@ -4814,6 +4812,7 @@ static void smi_work(struct work_struct *t)
48144812
struct ipmi_smi *intf = from_work(intf, t, smi_work);
48154813
int run_to_completion = READ_ONCE(intf->run_to_completion);
48164814
struct ipmi_smi_msg *newmsg = NULL;
4815+
struct ipmi_recv_msg *msg, *msg2;
48174816

48184817
/*
48194818
* Start the next message if available.
@@ -4851,6 +4850,16 @@ static void smi_work(struct work_struct *t)
48514850
rcu_read_unlock();
48524851

48534852
handle_new_recv_msgs(intf);
4853+
4854+
mutex_lock(&intf->user_msgs_mutex);
4855+
list_for_each_entry_safe(msg, msg2, &intf->user_msgs, link) {
4856+
struct ipmi_user *user = msg->user;
4857+
4858+
atomic_dec(&user->nr_msgs);
4859+
user->handler->ipmi_recv_hndl(msg, user->handler_data);
4860+
kref_put(&user->refcount, free_user);
4861+
}
4862+
mutex_unlock(&intf->user_msgs_mutex);
48544863
}
48554864

48564865
/* Handle a new message from the lower layer. */

0 commit comments

Comments
 (0)