Merge to Fedora kernel-2.6.18-1.2224_FC5 patched with stable patch-2.6.18.1-vs2.0...
[linux-2.6.git] / drivers / xen / tpmback / tpmback.c
1 /******************************************************************************
2  * drivers/xen/tpmback/tpmback.c
3  *
4  * Copyright (c) 2005, IBM Corporation
5  *
6  * Author: Stefan Berger, stefanb@us.ibm.com
7  * Grant table support: Mahadevan Gomathisankaran
8  *
9  * This code has been derived from drivers/xen/netback/netback.c
10  * Copyright (c) 2002-2004, K A Fraser
11  *
12  */
13
14 #include "common.h"
15 #include <xen/evtchn.h>
16
17 #include <linux/types.h>
18 #include <linux/list.h>
19 #include <linux/miscdevice.h>
20 #include <linux/poll.h>
21 #include <asm/uaccess.h>
22 #include <xen/xenbus.h>
23 #include <xen/interface/grant_table.h>
24 #include <xen/gnttab.h>
25
26 /* local data structures */
27 struct data_exchange {
28         struct list_head pending_pak;
29         struct list_head current_pak;
30         unsigned int copied_so_far;
31         u8 has_opener:1;
32         u8 aborted:1;
33         rwlock_t pak_lock;      // protects all of the previous fields
34         wait_queue_head_t wait_queue;
35 };
36
37 struct vtpm_resp_hdr {
38         uint32_t instance_no;
39         uint16_t tag_no;
40         uint32_t len_no;
41         uint32_t ordinal_no;
42 } __attribute__ ((packed));
43
44 struct packet {
45         struct list_head next;
46         unsigned int data_len;
47         u8 *data_buffer;
48         tpmif_t *tpmif;
49         u32 tpm_instance;
50         u8 req_tag;
51         u32 last_read;
52         u8 flags;
53         struct timer_list processing_timer;
54 };
55
56 enum {
57         PACKET_FLAG_DISCARD_RESPONSE = 1,
58 };
59
60 /* local variables */
61 static struct data_exchange dataex;
62
63 /* local function prototypes */
64 static int _packet_write(struct packet *pak,
65                          const char *data, size_t size, int userbuffer);
66 static void processing_timeout(unsigned long ptr);
67 static int packet_read_shmem(struct packet *pak,
68                              tpmif_t * tpmif,
69                              u32 offset,
70                              char *buffer, int isuserbuffer, u32 left);
71 static int vtpm_queue_packet(struct packet *pak);
72
73 /***************************************************************
74  Buffer copying fo user and kernel space buffes.
75 ***************************************************************/
76 static inline int copy_from_buffer(void *to,
77                                    const void *from, unsigned long size,
78                                    int isuserbuffer)
79 {
80         if (isuserbuffer) {
81                 if (copy_from_user(to, (void __user *)from, size))
82                         return -EFAULT;
83         } else {
84                 memcpy(to, from, size);
85         }
86         return 0;
87 }
88
89 static inline int copy_to_buffer(void *to,
90                                  const void *from, unsigned long size,
91                                  int isuserbuffer)
92 {
93         if (isuserbuffer) {
94                 if (copy_to_user((void __user *)to, from, size))
95                         return -EFAULT;
96         } else {
97                 memcpy(to, from, size);
98         }
99         return 0;
100 }
101
102
103 static void dataex_init(struct data_exchange *dataex)
104 {
105         INIT_LIST_HEAD(&dataex->pending_pak);
106         INIT_LIST_HEAD(&dataex->current_pak);
107         dataex->has_opener = 0;
108         rwlock_init(&dataex->pak_lock);
109         init_waitqueue_head(&dataex->wait_queue);
110 }
111
112 /***************************************************************
113  Packet-related functions
114 ***************************************************************/
115
116 static struct packet *packet_find_instance(struct list_head *head,
117                                            u32 tpm_instance)
118 {
119         struct packet *pak;
120         struct list_head *p;
121
122         /*
123          * traverse the list of packets and return the first
124          * one with the given instance number
125          */
126         list_for_each(p, head) {
127                 pak = list_entry(p, struct packet, next);
128
129                 if (pak->tpm_instance == tpm_instance) {
130                         return pak;
131                 }
132         }
133         return NULL;
134 }
135
136 static struct packet *packet_find_packet(struct list_head *head, void *packet)
137 {
138         struct packet *pak;
139         struct list_head *p;
140
141         /*
142          * traverse the list of packets and return the first
143          * one with the given instance number
144          */
145         list_for_each(p, head) {
146                 pak = list_entry(p, struct packet, next);
147
148                 if (pak == packet) {
149                         return pak;
150                 }
151         }
152         return NULL;
153 }
154
155 static struct packet *packet_alloc(tpmif_t * tpmif,
156                                    u32 size, u8 req_tag, u8 flags)
157 {
158         struct packet *pak = NULL;
159         pak = kzalloc(sizeof (struct packet), GFP_ATOMIC);
160         if (NULL != pak) {
161                 if (tpmif) {
162                         pak->tpmif = tpmif;
163                         pak->tpm_instance = tpmback_get_instance(tpmif->bi);
164                         tpmif_get(tpmif);
165                 }
166                 pak->data_len = size;
167                 pak->req_tag = req_tag;
168                 pak->last_read = 0;
169                 pak->flags = flags;
170
171                 /*
172                  * cannot do tpmif_get(tpmif); bad things happen
173                  * on the last tpmif_put()
174                  */
175                 init_timer(&pak->processing_timer);
176                 pak->processing_timer.function = processing_timeout;
177                 pak->processing_timer.data = (unsigned long)pak;
178         }
179         return pak;
180 }
181
182 static void inline packet_reset(struct packet *pak)
183 {
184         pak->last_read = 0;
185 }
186
187 static void packet_free(struct packet *pak)
188 {
189         if (timer_pending(&pak->processing_timer)) {
190                 BUG();
191         }
192
193         if (pak->tpmif)
194                 tpmif_put(pak->tpmif);
195         kfree(pak->data_buffer);
196         /*
197          * cannot do tpmif_put(pak->tpmif); bad things happen
198          * on the last tpmif_put()
199          */
200         kfree(pak);
201 }
202
203
204 /*
205  * Write data to the shared memory and send it to the FE.
206  */
207 static int packet_write(struct packet *pak,
208                         const char *data, size_t size, int isuserbuffer)
209 {
210         int rc = 0;
211
212         if (0 != (pak->flags & PACKET_FLAG_DISCARD_RESPONSE)) {
213                 /* Don't send a respone to this packet. Just acknowledge it. */
214                 rc = size;
215         } else {
216                 rc = _packet_write(pak, data, size, isuserbuffer);
217         }
218
219         return rc;
220 }
221
222 int _packet_write(struct packet *pak,
223                   const char *data, size_t size, int isuserbuffer)
224 {
225         /*
226          * Write into the shared memory pages directly
227          * and send it to the front end.
228          */
229         tpmif_t *tpmif = pak->tpmif;
230         grant_handle_t handle;
231         int rc = 0;
232         unsigned int i = 0;
233         unsigned int offset = 0;
234
235         if (tpmif == NULL) {
236                 return -EFAULT;
237         }
238
239         if (tpmif->status == DISCONNECTED) {
240                 return size;
241         }
242
243         while (offset < size && i < TPMIF_TX_RING_SIZE) {
244                 unsigned int tocopy;
245                 struct gnttab_map_grant_ref map_op;
246                 struct gnttab_unmap_grant_ref unmap_op;
247                 tpmif_tx_request_t *tx;
248
249                 tx = &tpmif->tx->ring[i].req;
250
251                 if (0 == tx->addr) {
252                         DPRINTK("ERROR: Buffer for outgoing packet NULL?! i=%d\n", i);
253                         return 0;
254                 }
255
256                 gnttab_set_map_op(&map_op, idx_to_kaddr(tpmif, i),
257                                   GNTMAP_host_map, tx->ref, tpmif->domid);
258
259                 if (unlikely(HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref,
260                                                        &map_op, 1))) {
261                         BUG();
262                 }
263
264                 handle = map_op.handle;
265
266                 if (map_op.status) {
267                         DPRINTK(" Grant table operation failure !\n");
268                         return 0;
269                 }
270
271                 tocopy = min_t(size_t, size - offset, PAGE_SIZE);
272
273                 if (copy_from_buffer((void *)(idx_to_kaddr(tpmif, i) |
274                                               (tx->addr & ~PAGE_MASK)),
275                                      &data[offset], tocopy, isuserbuffer)) {
276                         tpmif_put(tpmif);
277                         return -EFAULT;
278                 }
279                 tx->size = tocopy;
280
281                 gnttab_set_unmap_op(&unmap_op, idx_to_kaddr(tpmif, i),
282                                     GNTMAP_host_map, handle);
283
284                 if (unlikely
285                     (HYPERVISOR_grant_table_op
286                      (GNTTABOP_unmap_grant_ref, &unmap_op, 1))) {
287                         BUG();
288                 }
289
290                 offset += tocopy;
291                 i++;
292         }
293
294         rc = offset;
295         DPRINTK("Notifying frontend via irq %d\n", tpmif->irq);
296         notify_remote_via_irq(tpmif->irq);
297
298         return rc;
299 }
300
301 /*
302  * Read data from the shared memory and copy it directly into the
303  * provided buffer. Advance the read_last indicator which tells
304  * how many bytes have already been read.
305  */
306 static int packet_read(struct packet *pak, size_t numbytes,
307                        char *buffer, size_t buffersize, int isuserbuffer)
308 {
309         tpmif_t *tpmif = pak->tpmif;
310
311         /*
312          * Read 'numbytes' of data from the buffer. The first 4
313          * bytes are the instance number in network byte order,
314          * after that come the data from the shared memory buffer.
315          */
316         u32 to_copy;
317         u32 offset = 0;
318         u32 room_left = buffersize;
319
320         if (pak->last_read < 4) {
321                 /*
322                  * copy the instance number into the buffer
323                  */
324                 u32 instance_no = htonl(pak->tpm_instance);
325                 u32 last_read = pak->last_read;
326
327                 to_copy = min_t(size_t, 4 - last_read, numbytes);
328
329                 if (copy_to_buffer(&buffer[0],
330                                    &(((u8 *) & instance_no)[last_read]),
331                                    to_copy, isuserbuffer)) {
332                         return -EFAULT;
333                 }
334
335                 pak->last_read += to_copy;
336                 offset += to_copy;
337                 room_left -= to_copy;
338         }
339
340         /*
341          * If the packet has a data buffer appended, read from it...
342          */
343
344         if (room_left > 0) {
345                 if (pak->data_buffer) {
346                         u32 to_copy = min_t(u32, pak->data_len - offset, room_left);
347                         u32 last_read = pak->last_read - 4;
348
349                         if (copy_to_buffer(&buffer[offset],
350                                            &pak->data_buffer[last_read],
351                                            to_copy, isuserbuffer)) {
352                                 return -EFAULT;
353                         }
354                         pak->last_read += to_copy;
355                         offset += to_copy;
356                 } else {
357                         offset = packet_read_shmem(pak,
358                                                    tpmif,
359                                                    offset,
360                                                    buffer,
361                                                    isuserbuffer, room_left);
362                 }
363         }
364         return offset;
365 }
366
367 static int packet_read_shmem(struct packet *pak,
368                              tpmif_t * tpmif,
369                              u32 offset, char *buffer, int isuserbuffer,
370                              u32 room_left)
371 {
372         u32 last_read = pak->last_read - 4;
373         u32 i = (last_read / PAGE_SIZE);
374         u32 pg_offset = last_read & (PAGE_SIZE - 1);
375         u32 to_copy;
376         grant_handle_t handle;
377
378         tpmif_tx_request_t *tx;
379
380         tx = &tpmif->tx->ring[0].req;
381         /*
382          * Start copying data at the page with index 'index'
383          * and within that page at offset 'offset'.
384          * Copy a maximum of 'room_left' bytes.
385          */
386         to_copy = min_t(u32, PAGE_SIZE - pg_offset, room_left);
387         while (to_copy > 0) {
388                 void *src;
389                 struct gnttab_map_grant_ref map_op;
390                 struct gnttab_unmap_grant_ref unmap_op;
391
392                 tx = &tpmif->tx->ring[i].req;
393
394                 gnttab_set_map_op(&map_op, idx_to_kaddr(tpmif, i),
395                                   GNTMAP_host_map, tx->ref, tpmif->domid);
396
397                 if (unlikely(HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref,
398                                                        &map_op, 1))) {
399                         BUG();
400                 }
401
402                 if (map_op.status) {
403                         DPRINTK(" Grant table operation failure !\n");
404                         return -EFAULT;
405                 }
406
407                 handle = map_op.handle;
408
409                 if (to_copy > tx->size) {
410                         /*
411                          * User requests more than what's available
412                          */
413                         to_copy = min_t(u32, tx->size, to_copy);
414                 }
415
416                 DPRINTK("Copying from mapped memory at %08lx\n",
417                         (unsigned long)(idx_to_kaddr(tpmif, i) |
418                                         (tx->addr & ~PAGE_MASK)));
419
420                 src = (void *)(idx_to_kaddr(tpmif, i) |
421                                ((tx->addr & ~PAGE_MASK) + pg_offset));
422                 if (copy_to_buffer(&buffer[offset],
423                                    src, to_copy, isuserbuffer)) {
424                         return -EFAULT;
425                 }
426
427                 DPRINTK("Data from TPM-FE of domain %d are %d %d %d %d\n",
428                         tpmif->domid, buffer[offset], buffer[offset + 1],
429                         buffer[offset + 2], buffer[offset + 3]);
430
431                 gnttab_set_unmap_op(&unmap_op, idx_to_kaddr(tpmif, i),
432                                     GNTMAP_host_map, handle);
433
434                 if (unlikely
435                     (HYPERVISOR_grant_table_op
436                      (GNTTABOP_unmap_grant_ref, &unmap_op, 1))) {
437                         BUG();
438                 }
439
440                 offset += to_copy;
441                 pg_offset = 0;
442                 last_read += to_copy;
443                 room_left -= to_copy;
444
445                 to_copy = min_t(u32, PAGE_SIZE, room_left);
446                 i++;
447         }                       /* while (to_copy > 0) */
448         /*
449          * Adjust the last_read pointer
450          */
451         pak->last_read = last_read + 4;
452         return offset;
453 }
454
455 /* ============================================================
456  * The file layer for reading data from this device
457  * ============================================================
458  */
459 static int vtpm_op_open(struct inode *inode, struct file *f)
460 {
461         int rc = 0;
462         unsigned long flags;
463
464         write_lock_irqsave(&dataex.pak_lock, flags);
465         if (dataex.has_opener == 0) {
466                 dataex.has_opener = 1;
467         } else {
468                 rc = -EPERM;
469         }
470         write_unlock_irqrestore(&dataex.pak_lock, flags);
471         return rc;
472 }
473
474 static ssize_t vtpm_op_read(struct file *file,
475                             char __user * data, size_t size, loff_t * offset)
476 {
477         int ret_size = -ENODATA;
478         struct packet *pak = NULL;
479         unsigned long flags;
480
481         write_lock_irqsave(&dataex.pak_lock, flags);
482         if (dataex.aborted) {
483                 dataex.aborted = 0;
484                 dataex.copied_so_far = 0;
485                 write_unlock_irqrestore(&dataex.pak_lock, flags);
486                 return -EIO;
487         }
488
489         if (list_empty(&dataex.pending_pak)) {
490                 write_unlock_irqrestore(&dataex.pak_lock, flags);
491                 wait_event_interruptible(dataex.wait_queue,
492                                          !list_empty(&dataex.pending_pak));
493                 write_lock_irqsave(&dataex.pak_lock, flags);
494                 dataex.copied_so_far = 0;
495         }
496
497         if (!list_empty(&dataex.pending_pak)) {
498                 unsigned int left;
499
500                 pak = list_entry(dataex.pending_pak.next, struct packet, next);
501                 left = pak->data_len - dataex.copied_so_far;
502                 list_del(&pak->next);
503                 write_unlock_irqrestore(&dataex.pak_lock, flags);
504
505                 DPRINTK("size given by app: %d, available: %d\n", size, left);
506
507                 ret_size = min_t(size_t, size, left);
508
509                 ret_size = packet_read(pak, ret_size, data, size, 1);
510
511                 write_lock_irqsave(&dataex.pak_lock, flags);
512
513                 if (ret_size < 0) {
514                         del_singleshot_timer_sync(&pak->processing_timer);
515                         packet_free(pak);
516                         dataex.copied_so_far = 0;
517                 } else {
518                         DPRINTK("Copied %d bytes to user buffer\n", ret_size);
519
520                         dataex.copied_so_far += ret_size;
521                         if (dataex.copied_so_far >= pak->data_len + 4) {
522                                 DPRINTK("All data from this packet given to app.\n");
523                                 /* All data given to app */
524
525                                 del_singleshot_timer_sync(&pak->
526                                                           processing_timer);
527                                 list_add_tail(&pak->next, &dataex.current_pak);
528                                 /*
529                                  * The more fontends that are handled at the same time,
530                                  * the more time we give the TPM to process the request.
531                                  */
532                                 mod_timer(&pak->processing_timer,
533                                           jiffies + (num_frontends * 60 * HZ));
534                                 dataex.copied_so_far = 0;
535                         } else {
536                                 list_add(&pak->next, &dataex.pending_pak);
537                         }
538                 }
539         }
540         write_unlock_irqrestore(&dataex.pak_lock, flags);
541
542         DPRINTK("Returning result from read to app: %d\n", ret_size);
543
544         return ret_size;
545 }
546
547 /*
548  * Write operation - only works after a previous read operation!
549  */
550 static ssize_t vtpm_op_write(struct file *file,
551                              const char __user * data, size_t size,
552                              loff_t * offset)
553 {
554         struct packet *pak;
555         int rc = 0;
556         unsigned int off = 4;
557         unsigned long flags;
558         struct vtpm_resp_hdr vrh;
559
560         /*
561          * Minimum required packet size is:
562          * 4 bytes for instance number
563          * 2 bytes for tag
564          * 4 bytes for paramSize
565          * 4 bytes for the ordinal
566          * sum: 14 bytes
567          */
568         if (size < sizeof (vrh))
569                 return -EFAULT;
570
571         if (copy_from_user(&vrh, data, sizeof (vrh)))
572                 return -EFAULT;
573
574         /* malformed packet? */
575         if ((off + ntohl(vrh.len_no)) != size)
576                 return -EFAULT;
577
578         write_lock_irqsave(&dataex.pak_lock, flags);
579         pak = packet_find_instance(&dataex.current_pak,
580                                    ntohl(vrh.instance_no));
581
582         if (pak == NULL) {
583                 write_unlock_irqrestore(&dataex.pak_lock, flags);
584                 DPRINTK(KERN_ALERT "No associated packet! (inst=%d)\n",
585                         ntohl(vrh.instance_no));
586                 return -EFAULT;
587         }
588
589         del_singleshot_timer_sync(&pak->processing_timer);
590         list_del(&pak->next);
591
592         write_unlock_irqrestore(&dataex.pak_lock, flags);
593
594         /*
595          * The first 'offset' bytes must be the instance number - skip them.
596          */
597         size -= off;
598
599         rc = packet_write(pak, &data[off], size, 1);
600
601         if (rc > 0) {
602                 /* I neglected the first 4 bytes */
603                 rc += off;
604         }
605         packet_free(pak);
606         return rc;
607 }
608
609 static int vtpm_op_release(struct inode *inode, struct file *file)
610 {
611         unsigned long flags;
612
613         vtpm_release_packets(NULL, 1);
614         write_lock_irqsave(&dataex.pak_lock, flags);
615         dataex.has_opener = 0;
616         write_unlock_irqrestore(&dataex.pak_lock, flags);
617         return 0;
618 }
619
620 static unsigned int vtpm_op_poll(struct file *file,
621                                  struct poll_table_struct *pts)
622 {
623         unsigned int flags = POLLOUT | POLLWRNORM;
624
625         poll_wait(file, &dataex.wait_queue, pts);
626         if (!list_empty(&dataex.pending_pak)) {
627                 flags |= POLLIN | POLLRDNORM;
628         }
629         return flags;
630 }
631
632 static struct file_operations vtpm_ops = {
633         .owner = THIS_MODULE,
634         .llseek = no_llseek,
635         .open = vtpm_op_open,
636         .read = vtpm_op_read,
637         .write = vtpm_op_write,
638         .release = vtpm_op_release,
639         .poll = vtpm_op_poll,
640 };
641
642 static struct miscdevice vtpms_miscdevice = {
643         .minor = 225,
644         .name = "vtpm",
645         .fops = &vtpm_ops,
646 };
647
648 /***************************************************************
649  Utility functions
650 ***************************************************************/
651
652 static int tpm_send_fail_message(struct packet *pak, u8 req_tag)
653 {
654         int rc;
655         static const unsigned char tpm_error_message_fail[] = {
656                 0x00, 0x00,
657                 0x00, 0x00, 0x00, 0x0a,
658                 0x00, 0x00, 0x00, 0x09  /* TPM_FAIL */
659         };
660         unsigned char buffer[sizeof (tpm_error_message_fail)];
661
662         memcpy(buffer, tpm_error_message_fail,
663                sizeof (tpm_error_message_fail));
664         /*
665          * Insert the right response tag depending on the given tag
666          * All response tags are '+3' to the request tag.
667          */
668         buffer[1] = req_tag + 3;
669
670         /*
671          * Write the data to shared memory and notify the front-end
672          */
673         rc = packet_write(pak, buffer, sizeof (buffer), 0);
674
675         return rc;
676 }
677
678 static int _vtpm_release_packets(struct list_head *head,
679                                  tpmif_t * tpmif, int send_msgs)
680 {
681         int aborted = 0;
682         int c = 0;
683         struct packet *pak;
684         struct list_head *pos, *tmp;
685
686         list_for_each_safe(pos, tmp, head) {
687                 pak = list_entry(pos, struct packet, next);
688                 c += 1;
689
690                 if (tpmif == NULL || pak->tpmif == tpmif) {
691                         int can_send = 0;
692
693                         del_singleshot_timer_sync(&pak->processing_timer);
694                         list_del(&pak->next);
695
696                         if (pak->tpmif && pak->tpmif->status == CONNECTED) {
697                                 can_send = 1;
698                         }
699
700                         if (send_msgs && can_send) {
701                                 tpm_send_fail_message(pak, pak->req_tag);
702                         }
703                         packet_free(pak);
704                         if (c == 1)
705                                 aborted = 1;
706                 }
707         }
708         return aborted;
709 }
710
711 int vtpm_release_packets(tpmif_t * tpmif, int send_msgs)
712 {
713         unsigned long flags;
714
715         write_lock_irqsave(&dataex.pak_lock, flags);
716
717         dataex.aborted = _vtpm_release_packets(&dataex.pending_pak,
718                                                tpmif,
719                                                send_msgs);
720         _vtpm_release_packets(&dataex.current_pak, tpmif, send_msgs);
721
722         write_unlock_irqrestore(&dataex.pak_lock, flags);
723         return 0;
724 }
725
726 static int vtpm_queue_packet(struct packet *pak)
727 {
728         int rc = 0;
729
730         if (dataex.has_opener) {
731                 unsigned long flags;
732
733                 write_lock_irqsave(&dataex.pak_lock, flags);
734                 list_add_tail(&pak->next, &dataex.pending_pak);
735                 /* give the TPM some time to pick up the request */
736                 mod_timer(&pak->processing_timer, jiffies + (30 * HZ));
737                 write_unlock_irqrestore(&dataex.pak_lock, flags);
738
739                 wake_up_interruptible(&dataex.wait_queue);
740         } else {
741                 rc = -EFAULT;
742         }
743         return rc;
744 }
745
746 static int vtpm_receive(tpmif_t * tpmif, u32 size)
747 {
748         int rc = 0;
749         unsigned char buffer[10];
750         __be32 *native_size;
751         struct packet *pak = packet_alloc(tpmif, size, 0, 0);
752
753         if (!pak)
754                 return -ENOMEM;
755         /*
756          * Read 10 bytes from the received buffer to test its
757          * content for validity.
758          */
759         if (sizeof (buffer) != packet_read(pak,
760                                            sizeof (buffer), buffer,
761                                            sizeof (buffer), 0)) {
762                 goto failexit;
763         }
764         /*
765          * Reset the packet read pointer so we can read all its
766          * contents again.
767          */
768         packet_reset(pak);
769
770         native_size = (__force __be32 *) (&buffer[4 + 2]);
771         /*
772          * Verify that the size of the packet is correct
773          * as indicated and that there's actually someone reading packets.
774          * The minimum size of the packet is '10' for tag, size indicator
775          * and ordinal.
776          */
777         if (size < 10 ||
778             be32_to_cpu(*native_size) != size ||
779             0 == dataex.has_opener || tpmif->status != CONNECTED) {
780                 rc = -EINVAL;
781                 goto failexit;
782         } else {
783                 rc = vtpm_queue_packet(pak);
784                 if (rc < 0)
785                         goto failexit;
786         }
787         return 0;
788
789       failexit:
790         if (pak) {
791                 tpm_send_fail_message(pak, buffer[4 + 1]);
792                 packet_free(pak);
793         }
794         return rc;
795 }
796
797 /*
798  * Timeout function that gets invoked when a packet has not been processed
799  * during the timeout period.
800  * The packet must be on a list when this function is invoked. This
801  * also means that once its taken off a list, the timer must be
802  * destroyed as well.
803  */
804 static void processing_timeout(unsigned long ptr)
805 {
806         struct packet *pak = (struct packet *)ptr;
807         unsigned long flags;
808
809         write_lock_irqsave(&dataex.pak_lock, flags);
810         /*
811          * The packet needs to be searched whether it
812          * is still on the list.
813          */
814         if (pak == packet_find_packet(&dataex.pending_pak, pak) ||
815             pak == packet_find_packet(&dataex.current_pak, pak)) {
816                 if ((pak->flags & PACKET_FLAG_DISCARD_RESPONSE) == 0) {
817                         tpm_send_fail_message(pak, pak->req_tag);
818                 }
819                 /* discard future responses */
820                 pak->flags |= PACKET_FLAG_DISCARD_RESPONSE;
821         }
822
823         write_unlock_irqrestore(&dataex.pak_lock, flags);
824 }
825
826 static void tpm_tx_action(unsigned long unused);
827 static DECLARE_TASKLET(tpm_tx_tasklet, tpm_tx_action, 0);
828
829 static struct list_head tpm_schedule_list;
830 static spinlock_t tpm_schedule_list_lock;
831
832 static inline void maybe_schedule_tx_action(void)
833 {
834         smp_mb();
835         tasklet_schedule(&tpm_tx_tasklet);
836 }
837
838 static inline int __on_tpm_schedule_list(tpmif_t * tpmif)
839 {
840         return tpmif->list.next != NULL;
841 }
842
843 static void remove_from_tpm_schedule_list(tpmif_t * tpmif)
844 {
845         spin_lock_irq(&tpm_schedule_list_lock);
846         if (likely(__on_tpm_schedule_list(tpmif))) {
847                 list_del(&tpmif->list);
848                 tpmif->list.next = NULL;
849                 tpmif_put(tpmif);
850         }
851         spin_unlock_irq(&tpm_schedule_list_lock);
852 }
853
854 static void add_to_tpm_schedule_list_tail(tpmif_t * tpmif)
855 {
856         if (__on_tpm_schedule_list(tpmif))
857                 return;
858
859         spin_lock_irq(&tpm_schedule_list_lock);
860         if (!__on_tpm_schedule_list(tpmif) && tpmif->active) {
861                 list_add_tail(&tpmif->list, &tpm_schedule_list);
862                 tpmif_get(tpmif);
863         }
864         spin_unlock_irq(&tpm_schedule_list_lock);
865 }
866
867 void tpmif_schedule_work(tpmif_t * tpmif)
868 {
869         add_to_tpm_schedule_list_tail(tpmif);
870         maybe_schedule_tx_action();
871 }
872
873 void tpmif_deschedule_work(tpmif_t * tpmif)
874 {
875         remove_from_tpm_schedule_list(tpmif);
876 }
877
878 static void tpm_tx_action(unsigned long unused)
879 {
880         struct list_head *ent;
881         tpmif_t *tpmif;
882         tpmif_tx_request_t *tx;
883
884         DPRINTK("%s: Getting data from front-end(s)!\n", __FUNCTION__);
885
886         while (!list_empty(&tpm_schedule_list)) {
887                 /* Get a tpmif from the list with work to do. */
888                 ent = tpm_schedule_list.next;
889                 tpmif = list_entry(ent, tpmif_t, list);
890                 tpmif_get(tpmif);
891                 remove_from_tpm_schedule_list(tpmif);
892
893                 tx = &tpmif->tx->ring[0].req;
894
895                 /* pass it up */
896                 vtpm_receive(tpmif, tx->size);
897
898                 tpmif_put(tpmif);
899         }
900 }
901
902 irqreturn_t tpmif_be_int(int irq, void *dev_id, struct pt_regs *regs)
903 {
904         tpmif_t *tpmif = (tpmif_t *) dev_id;
905
906         add_to_tpm_schedule_list_tail(tpmif);
907         maybe_schedule_tx_action();
908         return IRQ_HANDLED;
909 }
910
911 static int __init tpmback_init(void)
912 {
913         int rc;
914
915         if ((rc = misc_register(&vtpms_miscdevice)) != 0) {
916                 printk(KERN_ALERT
917                        "Could not register misc device for TPM BE.\n");
918                 return rc;
919         }
920
921         dataex_init(&dataex);
922
923         spin_lock_init(&tpm_schedule_list_lock);
924         INIT_LIST_HEAD(&tpm_schedule_list);
925
926         tpmif_interface_init();
927         tpmif_xenbus_init();
928
929         printk(KERN_ALERT "Successfully initialized TPM backend driver.\n");
930
931         return 0;
932 }
933
934 module_init(tpmback_init);
935
936 void __exit tpmback_exit(void)
937 {
938         vtpm_release_packets(NULL, 0);
939         tpmif_xenbus_exit();
940         tpmif_interface_exit();
941         misc_deregister(&vtpms_miscdevice);
942 }
943
944 MODULE_LICENSE("Dual BSD/GPL");