1 /* krxiod.c: Rx I/O daemon
3 * Copyright (C) 2002 Red Hat, Inc. All Rights Reserved.
4 * Written by David Howells (dhowells@redhat.com)
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
12 #include <linux/sched.h>
13 #include <linux/completion.h>
14 #include <linux/spinlock.h>
15 #include <linux/init.h>
16 #include <rxrpc/krxiod.h>
17 #include <rxrpc/transport.h>
18 #include <rxrpc/peer.h>
19 #include <rxrpc/call.h>
22 static DECLARE_WAIT_QUEUE_HEAD(rxrpc_krxiod_sleepq);
23 static DECLARE_COMPLETION(rxrpc_krxiod_dead);
25 static atomic_t rxrpc_krxiod_qcount = ATOMIC_INIT(0);
27 static LIST_HEAD(rxrpc_krxiod_transportq);
28 static DEFINE_SPINLOCK(rxrpc_krxiod_transportq_lock);
30 static LIST_HEAD(rxrpc_krxiod_callq);
31 static DEFINE_SPINLOCK(rxrpc_krxiod_callq_lock);
33 static volatile int rxrpc_krxiod_die;
35 /*****************************************************************************/
39 static int rxrpc_krxiod(void *arg)
41 DECLARE_WAITQUEUE(krxiod,current);
43 printk("Started krxiod %d\n",current->pid);
47 /* loop around waiting for work to do */
49 /* wait for work or to be told to exit */
50 _debug("### Begin Wait");
51 if (!atomic_read(&rxrpc_krxiod_qcount)) {
52 set_current_state(TASK_INTERRUPTIBLE);
54 add_wait_queue(&rxrpc_krxiod_sleepq, &krxiod);
57 set_current_state(TASK_INTERRUPTIBLE);
58 if (atomic_read(&rxrpc_krxiod_qcount) ||
60 signal_pending(current))
66 remove_wait_queue(&rxrpc_krxiod_sleepq, &krxiod);
67 set_current_state(TASK_RUNNING);
69 _debug("### End Wait");
71 /* do work if been given some to do */
72 _debug("### Begin Work");
74 /* see if there's a transport in need of attention */
75 if (!list_empty(&rxrpc_krxiod_transportq)) {
76 struct rxrpc_transport *trans = NULL;
78 spin_lock_irq(&rxrpc_krxiod_transportq_lock);
80 if (!list_empty(&rxrpc_krxiod_transportq)) {
82 rxrpc_krxiod_transportq.next,
83 struct rxrpc_transport,
86 list_del_init(&trans->krxiodq_link);
87 atomic_dec(&rxrpc_krxiod_qcount);
89 /* make sure it hasn't gone away and doesn't go
91 if (atomic_read(&trans->usage)>0)
92 rxrpc_get_transport(trans);
97 spin_unlock_irq(&rxrpc_krxiod_transportq_lock);
100 rxrpc_trans_receive_packet(trans);
101 rxrpc_put_transport(trans);
105 /* see if there's a call in need of attention */
106 if (!list_empty(&rxrpc_krxiod_callq)) {
107 struct rxrpc_call *call = NULL;
109 spin_lock_irq(&rxrpc_krxiod_callq_lock);
111 if (!list_empty(&rxrpc_krxiod_callq)) {
112 call = list_entry(rxrpc_krxiod_callq.next,
115 list_del_init(&call->rcv_krxiodq_lk);
116 atomic_dec(&rxrpc_krxiod_qcount);
118 /* make sure it hasn't gone away and doesn't go
120 if (atomic_read(&call->usage) > 0) {
122 " Begin Attend Call %p", call);
123 rxrpc_get_call(call);
130 spin_unlock_irq(&rxrpc_krxiod_callq_lock);
133 rxrpc_call_do_stuff(call);
134 rxrpc_put_call(call);
135 _debug("@@@ KRXIOD End Attend Call %p", call);
139 _debug("### End Work");
141 /* discard pending signals */
142 rxrpc_discard_my_signals();
144 } while (!rxrpc_krxiod_die);
147 complete_and_exit(&rxrpc_krxiod_dead, 0);
149 } /* end rxrpc_krxiod() */
151 /*****************************************************************************/
153 * start up a krxiod daemon
155 int __init rxrpc_krxiod_init(void)
157 return kernel_thread(rxrpc_krxiod, NULL, 0);
159 } /* end rxrpc_krxiod_init() */
161 /*****************************************************************************/
163 * kill the krxiod daemon and wait for it to complete
165 void rxrpc_krxiod_kill(void)
167 rxrpc_krxiod_die = 1;
168 wake_up_all(&rxrpc_krxiod_sleepq);
169 wait_for_completion(&rxrpc_krxiod_dead);
171 } /* end rxrpc_krxiod_kill() */
173 /*****************************************************************************/
175 * queue a transport for attention by krxiod
177 void rxrpc_krxiod_queue_transport(struct rxrpc_transport *trans)
183 if (list_empty(&trans->krxiodq_link)) {
184 spin_lock_irqsave(&rxrpc_krxiod_transportq_lock, flags);
186 if (list_empty(&trans->krxiodq_link)) {
187 if (atomic_read(&trans->usage) > 0) {
188 list_add_tail(&trans->krxiodq_link,
189 &rxrpc_krxiod_transportq);
190 atomic_inc(&rxrpc_krxiod_qcount);
194 spin_unlock_irqrestore(&rxrpc_krxiod_transportq_lock, flags);
195 wake_up_all(&rxrpc_krxiod_sleepq);
200 } /* end rxrpc_krxiod_queue_transport() */
202 /*****************************************************************************/
204 * dequeue a transport from krxiod's attention queue
206 void rxrpc_krxiod_dequeue_transport(struct rxrpc_transport *trans)
212 spin_lock_irqsave(&rxrpc_krxiod_transportq_lock, flags);
213 if (!list_empty(&trans->krxiodq_link)) {
214 list_del_init(&trans->krxiodq_link);
215 atomic_dec(&rxrpc_krxiod_qcount);
217 spin_unlock_irqrestore(&rxrpc_krxiod_transportq_lock, flags);
221 } /* end rxrpc_krxiod_dequeue_transport() */
223 /*****************************************************************************/
225 * queue a call for attention by krxiod
227 void rxrpc_krxiod_queue_call(struct rxrpc_call *call)
231 if (list_empty(&call->rcv_krxiodq_lk)) {
232 spin_lock_irqsave(&rxrpc_krxiod_callq_lock, flags);
233 if (atomic_read(&call->usage) > 0) {
234 list_add_tail(&call->rcv_krxiodq_lk,
235 &rxrpc_krxiod_callq);
236 atomic_inc(&rxrpc_krxiod_qcount);
238 spin_unlock_irqrestore(&rxrpc_krxiod_callq_lock, flags);
240 wake_up_all(&rxrpc_krxiod_sleepq);
242 } /* end rxrpc_krxiod_queue_call() */
244 /*****************************************************************************/
246 * dequeue a call from krxiod's attention queue
248 void rxrpc_krxiod_dequeue_call(struct rxrpc_call *call)
252 spin_lock_irqsave(&rxrpc_krxiod_callq_lock, flags);
253 if (!list_empty(&call->rcv_krxiodq_lk)) {
254 list_del_init(&call->rcv_krxiodq_lk);
255 atomic_dec(&rxrpc_krxiod_qcount);
257 spin_unlock_irqrestore(&rxrpc_krxiod_callq_lock, flags);
259 } /* end rxrpc_krxiod_dequeue_call() */