1 /******************************************************************************
2 * drivers/xen/tpmback/tpmback.c
4 * Copyright (c) 2005, IBM Corporation
6 * Author: Stefan Berger, stefanb@us.ibm.com
7 * Grant table support: Mahadevan Gomathisankaran
9 * This code has been derived from drivers/xen/netback/netback.c
10 * Copyright (c) 2002-2004, K A Fraser
15 #include <xen/evtchn.h>
17 #include <linux/types.h>
18 #include <linux/list.h>
19 #include <linux/miscdevice.h>
20 #include <linux/poll.h>
21 #include <asm/uaccess.h>
22 #include <xen/xenbus.h>
23 #include <xen/interface/grant_table.h>
24 #include <xen/gnttab.h>
26 /* local data structures */
27 struct data_exchange {
28 struct list_head pending_pak;
29 struct list_head current_pak;
30 unsigned int copied_so_far;
33 rwlock_t pak_lock; // protects all of the previous fields
34 wait_queue_head_t wait_queue;
37 struct vtpm_resp_hdr {
42 } __attribute__ ((packed));
45 struct list_head next;
46 unsigned int data_len;
53 struct timer_list processing_timer;
57 PACKET_FLAG_DISCARD_RESPONSE = 1,
61 static struct data_exchange dataex;
63 /* local function prototypes */
64 static int _packet_write(struct packet *pak,
65 const char *data, size_t size, int userbuffer);
66 static void processing_timeout(unsigned long ptr);
67 static int packet_read_shmem(struct packet *pak,
70 char *buffer, int isuserbuffer, u32 left);
71 static int vtpm_queue_packet(struct packet *pak);
73 /***************************************************************
74 Buffer copying fo user and kernel space buffes.
75 ***************************************************************/
76 static inline int copy_from_buffer(void *to,
77 const void *from, unsigned long size,
81 if (copy_from_user(to, (void __user *)from, size))
84 memcpy(to, from, size);
89 static inline int copy_to_buffer(void *to,
90 const void *from, unsigned long size,
94 if (copy_to_user((void __user *)to, from, size))
97 memcpy(to, from, size);
103 static void dataex_init(struct data_exchange *dataex)
105 INIT_LIST_HEAD(&dataex->pending_pak);
106 INIT_LIST_HEAD(&dataex->current_pak);
107 dataex->has_opener = 0;
108 rwlock_init(&dataex->pak_lock);
109 init_waitqueue_head(&dataex->wait_queue);
112 /***************************************************************
113 Packet-related functions
114 ***************************************************************/
116 static struct packet *packet_find_instance(struct list_head *head,
123 * traverse the list of packets and return the first
124 * one with the given instance number
126 list_for_each(p, head) {
127 pak = list_entry(p, struct packet, next);
129 if (pak->tpm_instance == tpm_instance) {
136 static struct packet *packet_find_packet(struct list_head *head, void *packet)
142 * traverse the list of packets and return the first
143 * one with the given instance number
145 list_for_each(p, head) {
146 pak = list_entry(p, struct packet, next);
155 static struct packet *packet_alloc(tpmif_t * tpmif,
156 u32 size, u8 req_tag, u8 flags)
158 struct packet *pak = NULL;
159 pak = kzalloc(sizeof (struct packet), GFP_ATOMIC);
163 pak->tpm_instance = tpmback_get_instance(tpmif->bi);
166 pak->data_len = size;
167 pak->req_tag = req_tag;
172 * cannot do tpmif_get(tpmif); bad things happen
173 * on the last tpmif_put()
175 init_timer(&pak->processing_timer);
176 pak->processing_timer.function = processing_timeout;
177 pak->processing_timer.data = (unsigned long)pak;
182 static void inline packet_reset(struct packet *pak)
187 static void packet_free(struct packet *pak)
189 if (timer_pending(&pak->processing_timer)) {
194 tpmif_put(pak->tpmif);
195 kfree(pak->data_buffer);
197 * cannot do tpmif_put(pak->tpmif); bad things happen
198 * on the last tpmif_put()
205 * Write data to the shared memory and send it to the FE.
207 static int packet_write(struct packet *pak,
208 const char *data, size_t size, int isuserbuffer)
212 if (0 != (pak->flags & PACKET_FLAG_DISCARD_RESPONSE)) {
213 /* Don't send a respone to this packet. Just acknowledge it. */
216 rc = _packet_write(pak, data, size, isuserbuffer);
222 int _packet_write(struct packet *pak,
223 const char *data, size_t size, int isuserbuffer)
226 * Write into the shared memory pages directly
227 * and send it to the front end.
229 tpmif_t *tpmif = pak->tpmif;
230 grant_handle_t handle;
233 unsigned int offset = 0;
239 if (tpmif->status == DISCONNECTED) {
243 while (offset < size && i < TPMIF_TX_RING_SIZE) {
245 struct gnttab_map_grant_ref map_op;
246 struct gnttab_unmap_grant_ref unmap_op;
247 tpmif_tx_request_t *tx;
249 tx = &tpmif->tx->ring[i].req;
252 DPRINTK("ERROR: Buffer for outgoing packet NULL?! i=%d\n", i);
256 gnttab_set_map_op(&map_op, idx_to_kaddr(tpmif, i),
257 GNTMAP_host_map, tx->ref, tpmif->domid);
259 if (unlikely(HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref,
264 handle = map_op.handle;
267 DPRINTK(" Grant table operation failure !\n");
271 tocopy = min_t(size_t, size - offset, PAGE_SIZE);
273 if (copy_from_buffer((void *)(idx_to_kaddr(tpmif, i) |
274 (tx->addr & ~PAGE_MASK)),
275 &data[offset], tocopy, isuserbuffer)) {
281 gnttab_set_unmap_op(&unmap_op, idx_to_kaddr(tpmif, i),
282 GNTMAP_host_map, handle);
285 (HYPERVISOR_grant_table_op
286 (GNTTABOP_unmap_grant_ref, &unmap_op, 1))) {
295 DPRINTK("Notifying frontend via irq %d\n", tpmif->irq);
296 notify_remote_via_irq(tpmif->irq);
302 * Read data from the shared memory and copy it directly into the
303 * provided buffer. Advance the read_last indicator which tells
304 * how many bytes have already been read.
306 static int packet_read(struct packet *pak, size_t numbytes,
307 char *buffer, size_t buffersize, int isuserbuffer)
309 tpmif_t *tpmif = pak->tpmif;
312 * Read 'numbytes' of data from the buffer. The first 4
313 * bytes are the instance number in network byte order,
314 * after that come the data from the shared memory buffer.
318 u32 room_left = buffersize;
320 if (pak->last_read < 4) {
322 * copy the instance number into the buffer
324 u32 instance_no = htonl(pak->tpm_instance);
325 u32 last_read = pak->last_read;
327 to_copy = min_t(size_t, 4 - last_read, numbytes);
329 if (copy_to_buffer(&buffer[0],
330 &(((u8 *) & instance_no)[last_read]),
331 to_copy, isuserbuffer)) {
335 pak->last_read += to_copy;
337 room_left -= to_copy;
341 * If the packet has a data buffer appended, read from it...
345 if (pak->data_buffer) {
346 u32 to_copy = min_t(u32, pak->data_len - offset, room_left);
347 u32 last_read = pak->last_read - 4;
349 if (copy_to_buffer(&buffer[offset],
350 &pak->data_buffer[last_read],
351 to_copy, isuserbuffer)) {
354 pak->last_read += to_copy;
357 offset = packet_read_shmem(pak,
361 isuserbuffer, room_left);
367 static int packet_read_shmem(struct packet *pak,
369 u32 offset, char *buffer, int isuserbuffer,
372 u32 last_read = pak->last_read - 4;
373 u32 i = (last_read / PAGE_SIZE);
374 u32 pg_offset = last_read & (PAGE_SIZE - 1);
376 grant_handle_t handle;
378 tpmif_tx_request_t *tx;
380 tx = &tpmif->tx->ring[0].req;
382 * Start copying data at the page with index 'index'
383 * and within that page at offset 'offset'.
384 * Copy a maximum of 'room_left' bytes.
386 to_copy = min_t(u32, PAGE_SIZE - pg_offset, room_left);
387 while (to_copy > 0) {
389 struct gnttab_map_grant_ref map_op;
390 struct gnttab_unmap_grant_ref unmap_op;
392 tx = &tpmif->tx->ring[i].req;
394 gnttab_set_map_op(&map_op, idx_to_kaddr(tpmif, i),
395 GNTMAP_host_map, tx->ref, tpmif->domid);
397 if (unlikely(HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref,
403 DPRINTK(" Grant table operation failure !\n");
407 handle = map_op.handle;
409 if (to_copy > tx->size) {
411 * User requests more than what's available
413 to_copy = min_t(u32, tx->size, to_copy);
416 DPRINTK("Copying from mapped memory at %08lx\n",
417 (unsigned long)(idx_to_kaddr(tpmif, i) |
418 (tx->addr & ~PAGE_MASK)));
420 src = (void *)(idx_to_kaddr(tpmif, i) |
421 ((tx->addr & ~PAGE_MASK) + pg_offset));
422 if (copy_to_buffer(&buffer[offset],
423 src, to_copy, isuserbuffer)) {
427 DPRINTK("Data from TPM-FE of domain %d are %d %d %d %d\n",
428 tpmif->domid, buffer[offset], buffer[offset + 1],
429 buffer[offset + 2], buffer[offset + 3]);
431 gnttab_set_unmap_op(&unmap_op, idx_to_kaddr(tpmif, i),
432 GNTMAP_host_map, handle);
435 (HYPERVISOR_grant_table_op
436 (GNTTABOP_unmap_grant_ref, &unmap_op, 1))) {
442 last_read += to_copy;
443 room_left -= to_copy;
445 to_copy = min_t(u32, PAGE_SIZE, room_left);
447 } /* while (to_copy > 0) */
449 * Adjust the last_read pointer
451 pak->last_read = last_read + 4;
455 /* ============================================================
456 * The file layer for reading data from this device
457 * ============================================================
459 static int vtpm_op_open(struct inode *inode, struct file *f)
464 write_lock_irqsave(&dataex.pak_lock, flags);
465 if (dataex.has_opener == 0) {
466 dataex.has_opener = 1;
470 write_unlock_irqrestore(&dataex.pak_lock, flags);
474 static ssize_t vtpm_op_read(struct file *file,
475 char __user * data, size_t size, loff_t * offset)
477 int ret_size = -ENODATA;
478 struct packet *pak = NULL;
481 write_lock_irqsave(&dataex.pak_lock, flags);
482 if (dataex.aborted) {
484 dataex.copied_so_far = 0;
485 write_unlock_irqrestore(&dataex.pak_lock, flags);
489 if (list_empty(&dataex.pending_pak)) {
490 write_unlock_irqrestore(&dataex.pak_lock, flags);
491 wait_event_interruptible(dataex.wait_queue,
492 !list_empty(&dataex.pending_pak));
493 write_lock_irqsave(&dataex.pak_lock, flags);
494 dataex.copied_so_far = 0;
497 if (!list_empty(&dataex.pending_pak)) {
500 pak = list_entry(dataex.pending_pak.next, struct packet, next);
501 left = pak->data_len - dataex.copied_so_far;
502 list_del(&pak->next);
503 write_unlock_irqrestore(&dataex.pak_lock, flags);
505 DPRINTK("size given by app: %d, available: %d\n", size, left);
507 ret_size = min_t(size_t, size, left);
509 ret_size = packet_read(pak, ret_size, data, size, 1);
511 write_lock_irqsave(&dataex.pak_lock, flags);
514 del_singleshot_timer_sync(&pak->processing_timer);
516 dataex.copied_so_far = 0;
518 DPRINTK("Copied %d bytes to user buffer\n", ret_size);
520 dataex.copied_so_far += ret_size;
521 if (dataex.copied_so_far >= pak->data_len + 4) {
522 DPRINTK("All data from this packet given to app.\n");
523 /* All data given to app */
525 del_singleshot_timer_sync(&pak->
527 list_add_tail(&pak->next, &dataex.current_pak);
529 * The more fontends that are handled at the same time,
530 * the more time we give the TPM to process the request.
532 mod_timer(&pak->processing_timer,
533 jiffies + (num_frontends * 60 * HZ));
534 dataex.copied_so_far = 0;
536 list_add(&pak->next, &dataex.pending_pak);
540 write_unlock_irqrestore(&dataex.pak_lock, flags);
542 DPRINTK("Returning result from read to app: %d\n", ret_size);
548 * Write operation - only works after a previous read operation!
550 static ssize_t vtpm_op_write(struct file *file,
551 const char __user * data, size_t size,
556 unsigned int off = 4;
558 struct vtpm_resp_hdr vrh;
561 * Minimum required packet size is:
562 * 4 bytes for instance number
564 * 4 bytes for paramSize
565 * 4 bytes for the ordinal
568 if (size < sizeof (vrh))
571 if (copy_from_user(&vrh, data, sizeof (vrh)))
574 /* malformed packet? */
575 if ((off + ntohl(vrh.len_no)) != size)
578 write_lock_irqsave(&dataex.pak_lock, flags);
579 pak = packet_find_instance(&dataex.current_pak,
580 ntohl(vrh.instance_no));
583 write_unlock_irqrestore(&dataex.pak_lock, flags);
584 DPRINTK(KERN_ALERT "No associated packet! (inst=%d)\n",
585 ntohl(vrh.instance_no));
589 del_singleshot_timer_sync(&pak->processing_timer);
590 list_del(&pak->next);
592 write_unlock_irqrestore(&dataex.pak_lock, flags);
595 * The first 'offset' bytes must be the instance number - skip them.
599 rc = packet_write(pak, &data[off], size, 1);
602 /* I neglected the first 4 bytes */
609 static int vtpm_op_release(struct inode *inode, struct file *file)
613 vtpm_release_packets(NULL, 1);
614 write_lock_irqsave(&dataex.pak_lock, flags);
615 dataex.has_opener = 0;
616 write_unlock_irqrestore(&dataex.pak_lock, flags);
620 static unsigned int vtpm_op_poll(struct file *file,
621 struct poll_table_struct *pts)
623 unsigned int flags = POLLOUT | POLLWRNORM;
625 poll_wait(file, &dataex.wait_queue, pts);
626 if (!list_empty(&dataex.pending_pak)) {
627 flags |= POLLIN | POLLRDNORM;
632 static struct file_operations vtpm_ops = {
633 .owner = THIS_MODULE,
635 .open = vtpm_op_open,
636 .read = vtpm_op_read,
637 .write = vtpm_op_write,
638 .release = vtpm_op_release,
639 .poll = vtpm_op_poll,
642 static struct miscdevice vtpms_miscdevice = {
648 /***************************************************************
650 ***************************************************************/
652 static int tpm_send_fail_message(struct packet *pak, u8 req_tag)
655 static const unsigned char tpm_error_message_fail[] = {
657 0x00, 0x00, 0x00, 0x0a,
658 0x00, 0x00, 0x00, 0x09 /* TPM_FAIL */
660 unsigned char buffer[sizeof (tpm_error_message_fail)];
662 memcpy(buffer, tpm_error_message_fail,
663 sizeof (tpm_error_message_fail));
665 * Insert the right response tag depending on the given tag
666 * All response tags are '+3' to the request tag.
668 buffer[1] = req_tag + 3;
671 * Write the data to shared memory and notify the front-end
673 rc = packet_write(pak, buffer, sizeof (buffer), 0);
678 static int _vtpm_release_packets(struct list_head *head,
679 tpmif_t * tpmif, int send_msgs)
684 struct list_head *pos, *tmp;
686 list_for_each_safe(pos, tmp, head) {
687 pak = list_entry(pos, struct packet, next);
690 if (tpmif == NULL || pak->tpmif == tpmif) {
693 del_singleshot_timer_sync(&pak->processing_timer);
694 list_del(&pak->next);
696 if (pak->tpmif && pak->tpmif->status == CONNECTED) {
700 if (send_msgs && can_send) {
701 tpm_send_fail_message(pak, pak->req_tag);
711 int vtpm_release_packets(tpmif_t * tpmif, int send_msgs)
715 write_lock_irqsave(&dataex.pak_lock, flags);
717 dataex.aborted = _vtpm_release_packets(&dataex.pending_pak,
720 _vtpm_release_packets(&dataex.current_pak, tpmif, send_msgs);
722 write_unlock_irqrestore(&dataex.pak_lock, flags);
726 static int vtpm_queue_packet(struct packet *pak)
730 if (dataex.has_opener) {
733 write_lock_irqsave(&dataex.pak_lock, flags);
734 list_add_tail(&pak->next, &dataex.pending_pak);
735 /* give the TPM some time to pick up the request */
736 mod_timer(&pak->processing_timer, jiffies + (30 * HZ));
737 write_unlock_irqrestore(&dataex.pak_lock, flags);
739 wake_up_interruptible(&dataex.wait_queue);
746 static int vtpm_receive(tpmif_t * tpmif, u32 size)
749 unsigned char buffer[10];
751 struct packet *pak = packet_alloc(tpmif, size, 0, 0);
756 * Read 10 bytes from the received buffer to test its
757 * content for validity.
759 if (sizeof (buffer) != packet_read(pak,
760 sizeof (buffer), buffer,
761 sizeof (buffer), 0)) {
765 * Reset the packet read pointer so we can read all its
770 native_size = (__force __be32 *) (&buffer[4 + 2]);
772 * Verify that the size of the packet is correct
773 * as indicated and that there's actually someone reading packets.
774 * The minimum size of the packet is '10' for tag, size indicator
778 be32_to_cpu(*native_size) != size ||
779 0 == dataex.has_opener || tpmif->status != CONNECTED) {
783 rc = vtpm_queue_packet(pak);
791 tpm_send_fail_message(pak, buffer[4 + 1]);
798 * Timeout function that gets invoked when a packet has not been processed
799 * during the timeout period.
800 * The packet must be on a list when this function is invoked. This
801 * also means that once its taken off a list, the timer must be
804 static void processing_timeout(unsigned long ptr)
806 struct packet *pak = (struct packet *)ptr;
809 write_lock_irqsave(&dataex.pak_lock, flags);
811 * The packet needs to be searched whether it
812 * is still on the list.
814 if (pak == packet_find_packet(&dataex.pending_pak, pak) ||
815 pak == packet_find_packet(&dataex.current_pak, pak)) {
816 if ((pak->flags & PACKET_FLAG_DISCARD_RESPONSE) == 0) {
817 tpm_send_fail_message(pak, pak->req_tag);
819 /* discard future responses */
820 pak->flags |= PACKET_FLAG_DISCARD_RESPONSE;
823 write_unlock_irqrestore(&dataex.pak_lock, flags);
826 static void tpm_tx_action(unsigned long unused);
827 static DECLARE_TASKLET(tpm_tx_tasklet, tpm_tx_action, 0);
829 static struct list_head tpm_schedule_list;
830 static spinlock_t tpm_schedule_list_lock;
832 static inline void maybe_schedule_tx_action(void)
835 tasklet_schedule(&tpm_tx_tasklet);
838 static inline int __on_tpm_schedule_list(tpmif_t * tpmif)
840 return tpmif->list.next != NULL;
843 static void remove_from_tpm_schedule_list(tpmif_t * tpmif)
845 spin_lock_irq(&tpm_schedule_list_lock);
846 if (likely(__on_tpm_schedule_list(tpmif))) {
847 list_del(&tpmif->list);
848 tpmif->list.next = NULL;
851 spin_unlock_irq(&tpm_schedule_list_lock);
854 static void add_to_tpm_schedule_list_tail(tpmif_t * tpmif)
856 if (__on_tpm_schedule_list(tpmif))
859 spin_lock_irq(&tpm_schedule_list_lock);
860 if (!__on_tpm_schedule_list(tpmif) && tpmif->active) {
861 list_add_tail(&tpmif->list, &tpm_schedule_list);
864 spin_unlock_irq(&tpm_schedule_list_lock);
867 void tpmif_schedule_work(tpmif_t * tpmif)
869 add_to_tpm_schedule_list_tail(tpmif);
870 maybe_schedule_tx_action();
873 void tpmif_deschedule_work(tpmif_t * tpmif)
875 remove_from_tpm_schedule_list(tpmif);
878 static void tpm_tx_action(unsigned long unused)
880 struct list_head *ent;
882 tpmif_tx_request_t *tx;
884 DPRINTK("%s: Getting data from front-end(s)!\n", __FUNCTION__);
886 while (!list_empty(&tpm_schedule_list)) {
887 /* Get a tpmif from the list with work to do. */
888 ent = tpm_schedule_list.next;
889 tpmif = list_entry(ent, tpmif_t, list);
891 remove_from_tpm_schedule_list(tpmif);
893 tx = &tpmif->tx->ring[0].req;
896 vtpm_receive(tpmif, tx->size);
902 irqreturn_t tpmif_be_int(int irq, void *dev_id, struct pt_regs *regs)
904 tpmif_t *tpmif = (tpmif_t *) dev_id;
906 add_to_tpm_schedule_list_tail(tpmif);
907 maybe_schedule_tx_action();
911 static int __init tpmback_init(void)
915 if ((rc = misc_register(&vtpms_miscdevice)) != 0) {
917 "Could not register misc device for TPM BE.\n");
921 dataex_init(&dataex);
923 spin_lock_init(&tpm_schedule_list_lock);
924 INIT_LIST_HEAD(&tpm_schedule_list);
926 tpmif_interface_init();
929 printk(KERN_ALERT "Successfully initialized TPM backend driver.\n");
934 module_init(tpmback_init);
936 void __exit tpmback_exit(void)
938 vtpm_release_packets(NULL, 0);
940 tpmif_interface_exit();
941 misc_deregister(&vtpms_miscdevice);
944 MODULE_LICENSE("Dual BSD/GPL");