2 * linux/drivers/s390/misc/z90main.c
6 * Copyright (C) 2001, 2004 IBM Corporation
7 * Author(s): Robert Burroughs (burrough@us.ibm.com)
8 * Eric Rossman (edrossma@us.ibm.com)
10 * Hotplug & misc device support: Jochen Roehrig (roehrig@de.ibm.com)
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; either version 2, or (at your option)
17 * This program is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 * GNU General Public License for more details.
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, write to the Free Software
24 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
27 #include <asm/uaccess.h> // copy_(from|to)_user
28 #include <linux/compat.h>
29 #include <linux/compiler.h>
30 #include <linux/delay.h> // mdelay
31 #include <linux/init.h>
32 #include <linux/interrupt.h> // for tasklets
33 #include <linux/ioctl32.h>
34 #include <linux/module.h>
35 #include <linux/moduleparam.h>
36 #include <linux/proc_fs.h>
37 #include <linux/syscalls.h>
38 #include <linux/version.h>
40 #include "z90common.h"
41 #ifndef Z90CRYPT_USE_HOTPLUG
42 #include <linux/miscdevice.h>
45 #define VERSION_CODE(vers, rel, seq) (((vers)<<16) | ((rel)<<8) | (seq))
46 #if LINUX_VERSION_CODE < VERSION_CODE(2,4,0) /* version < 2.4 */
47 # error "This kernel is too old: not supported"
49 #if LINUX_VERSION_CODE > VERSION_CODE(2,7,0) /* version > 2.6 */
50 # error "This kernel is too recent: not supported by this file"
53 #define VERSION_Z90MAIN_C "$Revision: 1.31 $"
55 static char z90cmain_version[] __initdata =
56 "z90main.o (" VERSION_Z90MAIN_C "/"
57 VERSION_Z90COMMON_H "/" VERSION_Z90CRYPT_H ")";
59 extern char z90chardware_version[];
62 * Defaults that may be modified.
65 #ifndef Z90CRYPT_USE_HOTPLUG
67 * You can specify a different minor at compile time.
69 #ifndef Z90CRYPT_MINOR
70 #define Z90CRYPT_MINOR MISC_DYNAMIC_MINOR
74 * You can specify a different major at compile time.
76 #ifndef Z90CRYPT_MAJOR
77 #define Z90CRYPT_MAJOR 0
82 * You can specify a different domain at compile time or on the insmod
86 #define DOMAIN_INDEX -1
90 * This is the name under which the device is registered in /proc/modules.
92 #define REG_NAME "z90crypt"
95 * Cleanup should run every CLEANUPTIME seconds and should clean up requests
96 * older than CLEANUPTIME seconds in the past.
99 #define CLEANUPTIME 15
103 * Config should run every CONFIGTIME seconds
106 #define CONFIGTIME 30
110 * The first execution of the config task should take place
111 * immediately after initialization
113 #ifndef INITIAL_CONFIGTIME
114 #define INITIAL_CONFIGTIME 1
118 * Reader should run every READERTIME milliseconds
125 * turn long device array index into device pointer
127 #define LONG2DEVPTR(ndx) (z90crypt.device_p[(ndx)])
130 * turn short device array index into long device array index
132 #define SHRT2LONG(ndx) (z90crypt.overall_device_x.device_index[(ndx)])
135 * turn short device array index into device pointer
137 #define SHRT2DEVPTR(ndx) LONG2DEVPTR(SHRT2LONG(ndx))
140 * Status for a work-element
142 #define STAT_DEFAULT 0x00 // request has not been processed
144 #define STAT_ROUTED 0x80 // bit 7: requests get routed to specific device
145 // else, device is determined each write
146 #define STAT_FAILED 0x40 // bit 6: this bit is set if the request failed
147 // before being sent to the hardware.
148 #define STAT_WRITTEN 0x30 // bits 5-4: work to be done, not sent to device
149 // 0x20 // UNUSED state
150 #define STAT_READPEND 0x10 // bits 5-4: work done, we're returning data now
151 #define STAT_NOWORK 0x00 // bits off: no work on any queue
152 #define STAT_RDWRMASK 0x30 // mask for bits 5-4
155 * Macros to check the status RDWRMASK
157 #define CHK_RDWRMASK(statbyte) ((statbyte) & STAT_RDWRMASK)
158 #define SET_RDWRMASK(statbyte, newval) \
159 {(statbyte) &= ~STAT_RDWRMASK; (statbyte) |= newval;}
162 * Audit Trail. Progress of a Work element
163 * audit[0]: Unless noted otherwise, these bits are all set by the process
165 #define FP_COPYFROM 0x80 // Caller's buffer has been copied to work element
166 #define FP_BUFFREQ 0x40 // Low Level buffer requested
167 #define FP_BUFFGOT 0x20 // Low Level buffer obtained
168 #define FP_SENT 0x10 // Work element sent to a crypto device
169 // (may be set by process or by reader task)
170 #define FP_PENDING 0x08 // Work element placed on pending queue
171 // (may be set by process or by reader task)
172 #define FP_REQUEST 0x04 // Work element placed on request queue
173 #define FP_ASLEEP 0x02 // Work element about to sleep
174 #define FP_AWAKE 0x01 // Work element has been awakened
177 * audit[1]: These bits are set by the reader task and/or the cleanup task
179 #define FP_NOTPENDING 0x80 // Work element removed from pending queue
180 #define FP_AWAKENING 0x40 // Caller about to be awakened
181 #define FP_TIMEDOUT 0x20 // Caller timed out
182 #define FP_RESPSIZESET 0x10 // Response size copied to work element
183 #define FP_RESPADDRCOPIED 0x08 // Response address copied to work element
184 #define FP_RESPBUFFCOPIED 0x04 // Response buffer copied to work element
185 #define FP_REMREQUEST 0x02 // Work element removed from request queue
186 #define FP_SIGNALED 0x01 // Work element was awakened by a signal
193 * state of the file handle in private_data.status
196 #define STAT_CLOSED 1
199 * PID() expands to the process ID of the current process
201 #define PID() (current->pid)
204 * Selected Constants. The number of APs and the number of devices
206 #ifndef Z90CRYPT_NUM_APS
207 #define Z90CRYPT_NUM_APS 64
209 #ifndef Z90CRYPT_NUM_DEVS
210 #define Z90CRYPT_NUM_DEVS Z90CRYPT_NUM_APS
212 #ifndef Z90CRYPT_NUM_TYPES
213 #define Z90CRYPT_NUM_TYPES 3
217 * Buffer size for receiving responses. The maximum Response Size
218 * is actually the maximum request size, since in an error condition
219 * the request itself may be returned unchanged.
221 #ifndef MAX_RESPONSE_SIZE
222 #define MAX_RESPONSE_SIZE 0x0000077C
226 * A count and status-byte mask
229 int st_count; // # of enabled devices
230 int disabled_count; // # of disabled devices
231 int user_disabled_count; // # of devices disabled via proc fs
232 unsigned char st_mask[Z90CRYPT_NUM_APS]; // current status mask
236 * The array of device indexes is a mechanism for fast indexing into
237 * a long (and sparse) array. For instance, if APs 3, 9 and 47 are
238 * installed, z90CDeviceIndex[0] is 3, z90CDeviceIndex[1] is 9, and
239 * z90CDeviceIndex[2] is 47.
242 int device_index[Z90CRYPT_NUM_DEVS];
246 * All devices are arranged in a single array: 64 APs
249 int dev_type; // PCICA, PCICC, or PCIXCC
250 enum devstat dev_stat; // current device status
251 int dev_self_x; // Index in array
252 int disabled; // Set when device is in error
253 int user_disabled; // Set when device is disabled by user
254 int dev_q_depth; // q depth
255 unsigned char * dev_resp_p; // Response buffer address
256 int dev_resp_l; // Response Buffer length
257 int dev_caller_count; // Number of callers
258 int dev_total_req_cnt; // # requests for device since load
259 struct list_head dev_caller_list; // List of callers
263 * There's a struct status and a struct device_x for each device type.
265 struct hdware_block {
266 struct status hdware_mask;
267 struct status type_mask[Z90CRYPT_NUM_TYPES];
268 struct device_x type_x_addr[Z90CRYPT_NUM_TYPES];
269 unsigned char device_type_array[Z90CRYPT_NUM_APS];
273 * z90crypt is the topmost data structure in the hierarchy.
276 int max_count; // Nr of possible crypto devices
278 int q_depth_array[Z90CRYPT_NUM_DEVS];
279 int dev_type_array[Z90CRYPT_NUM_DEVS];
280 struct device_x overall_device_x; // array device indexes
281 struct device * device_p[Z90CRYPT_NUM_DEVS];
283 int domain_established;// TRUE: domain has been found
284 int cdx; // Crypto Domain Index
285 int len; // Length of this data structure
286 struct hdware_block *hdware_info;
290 * An array of these structures is pointed to from dev_caller
291 * The length of the array depends on the device type. For APs,
294 * The caller buffer is allocated to the user at OPEN. At WRITE,
295 * it contains the request; at READ, the response. The function
296 * send_to_crypto_device converts the request to device-dependent
297 * form and use the caller's OPEN-allocated buffer for the response.
300 int caller_buf_l; // length of original request
301 unsigned char * caller_buf_p; // Original request on WRITE
302 int caller_dev_dep_req_l; // len device dependent request
303 unsigned char * caller_dev_dep_req_p; // Device dependent form
304 unsigned char caller_id[8]; // caller-supplied message id
305 struct list_head caller_liste;
306 unsigned char caller_dev_dep_req[MAX_RESPONSE_SIZE];
310 * Function prototypes from z90hardware.c
312 enum hdstat query_online(int, int, int, int *, int *);
313 enum devstat reset_device(int, int, int);
314 enum devstat send_to_AP(int, int, int, unsigned char *);
315 enum devstat receive_from_AP(int, int, int, unsigned char *, unsigned char *);
316 int convert_request(unsigned char *, int, short, int, int, int *,
318 int convert_response(unsigned char *, unsigned char *, int *, unsigned char *);
321 * Low level function prototypes
323 static int create_z90crypt(int *);
324 static int refresh_z90crypt(int *);
325 static int find_crypto_devices(struct status *);
326 static int create_crypto_device(int);
327 static int destroy_crypto_device(int);
328 static void destroy_z90crypt(void);
329 static int refresh_index_array(struct status *, struct device_x *);
330 static int probe_device_type(struct device *);
333 * proc fs definitions
335 static struct proc_dir_entry *z90crypt_entry;
342 * work_element.opener points back to this structure
346 unsigned char status; // 0: open 1: closed
350 * A work element is allocated for each request
352 struct work_element {
353 struct priv_data *priv_data;
355 int devindex; // index of device processing this w_e
356 // (If request did not specify device,
357 // -1 until placed onto a queue)
359 struct list_head liste; // used for requestq and pendingq
360 char buffer[128]; // local copy of user request
361 int buff_size; // size of the buffer for the request
362 char resp_buff[RESPBUFFSIZE];
364 char __user * resp_addr; // address of response in user space
365 unsigned int funccode; // function code of request
366 wait_queue_head_t waitq;
367 unsigned long requestsent; // time at which the request was sent
368 atomic_t alarmrung; // wake-up signal
369 unsigned char caller_id[8]; // pid + counter, for this w_e
370 unsigned char status[1]; // bits to mark status of the request
371 unsigned char audit[3]; // record of work element's progress
372 unsigned char * requestptr; // address of request buffer
373 int retcode; // return code of request
377 * High level function prototypes
379 static int z90crypt_open(struct inode *, struct file *);
380 static int z90crypt_release(struct inode *, struct file *);
381 static ssize_t z90crypt_read(struct file *, char __user *, size_t, loff_t *);
382 static ssize_t z90crypt_write(struct file *, const char __user *,
384 static int z90crypt_ioctl(struct inode *, struct file *,
385 unsigned int, unsigned long);
387 static void z90crypt_reader_task(unsigned long);
388 static void z90crypt_schedule_reader_task(unsigned long);
389 static void z90crypt_config_task(unsigned long);
390 static void z90crypt_cleanup_task(unsigned long);
392 static int z90crypt_status(char *, char **, off_t, int, int *, void *);
393 static int z90crypt_status_write(struct file *, const char __user *,
394 unsigned long, void *);
400 #ifdef Z90CRYPT_USE_HOTPLUG
401 #define Z90CRYPT_HOTPLUG_ADD 1
402 #define Z90CRYPT_HOTPLUG_REMOVE 2
404 static void z90crypt_hotplug_event(int, int, int);
408 * Storage allocated at initialization and used throughout the life of
411 #ifdef Z90CRYPT_USE_HOTPLUG
412 static int z90crypt_major = Z90CRYPT_MAJOR;
415 static int domain = DOMAIN_INDEX;
416 static struct z90crypt z90crypt;
417 static int quiesce_z90crypt;
418 static spinlock_t queuespinlock;
419 static struct list_head request_list;
420 static int requestq_count;
421 static struct list_head pending_list;
422 static int pendingq_count;
424 static struct tasklet_struct reader_tasklet;
425 static struct timer_list reader_timer;
426 static struct timer_list config_timer;
427 static struct timer_list cleanup_timer;
428 static atomic_t total_open;
429 static atomic_t z90crypt_step;
431 static struct file_operations z90crypt_fops = {
432 .owner = THIS_MODULE,
433 .read = z90crypt_read,
434 .write = z90crypt_write,
435 .ioctl = z90crypt_ioctl,
436 .open = z90crypt_open,
437 .release = z90crypt_release
440 #ifndef Z90CRYPT_USE_HOTPLUG
441 static struct miscdevice z90crypt_misc_device = {
442 .minor = Z90CRYPT_MINOR,
444 .fops = &z90crypt_fops,
445 .devfs_name = DEV_NAME
450 * Documentation values.
452 MODULE_AUTHOR("zLinux Crypto Team: Robert H. Burroughs, Eric D. Rossman"
453 "and Jochen Roehrig");
454 MODULE_DESCRIPTION("zLinux Cryptographic Coprocessor device driver, "
455 "Copyright 2001, 2004 IBM Corporation");
456 MODULE_LICENSE("GPL");
457 module_param(domain, int, 0);
458 MODULE_PARM_DESC(domain, "domain index for device");
462 * ioctl32 conversion routines
464 struct ica_rsa_modexpo_32 { // For 32-bit callers
465 compat_uptr_t inputdata;
466 unsigned int inputdatalength;
467 compat_uptr_t outputdata;
468 unsigned int outputdatalength;
470 compat_uptr_t n_modulus;
474 trans_modexpo32(unsigned int fd, unsigned int cmd, unsigned long arg,
477 struct ica_rsa_modexpo_32 __user *mex32u = compat_ptr(arg);
478 struct ica_rsa_modexpo_32 mex32k;
479 struct ica_rsa_modexpo __user *mex64;
483 if (!access_ok(VERIFY_WRITE, mex32u, sizeof(struct ica_rsa_modexpo_32)))
485 mex64 = compat_alloc_user_space(sizeof(struct ica_rsa_modexpo));
486 if (!access_ok(VERIFY_WRITE, mex64, sizeof(struct ica_rsa_modexpo)))
488 if (copy_from_user(&mex32k, mex32u, sizeof(struct ica_rsa_modexpo_32)))
490 if (__put_user(compat_ptr(mex32k.inputdata), &mex64->inputdata) ||
491 __put_user(mex32k.inputdatalength, &mex64->inputdatalength) ||
492 __put_user(compat_ptr(mex32k.outputdata), &mex64->outputdata) ||
493 __put_user(mex32k.outputdatalength, &mex64->outputdatalength) ||
494 __put_user(compat_ptr(mex32k.b_key), &mex64->b_key) ||
495 __put_user(compat_ptr(mex32k.n_modulus), &mex64->n_modulus))
497 ret = sys_ioctl(fd, cmd, (unsigned long)mex64);
499 if (__get_user(i, &mex64->outputdatalength) ||
500 __put_user(i, &mex32u->outputdatalength))
505 struct ica_rsa_modexpo_crt_32 { // For 32-bit callers
506 compat_uptr_t inputdata;
507 unsigned int inputdatalength;
508 compat_uptr_t outputdata;
509 unsigned int outputdatalength;
510 compat_uptr_t bp_key;
511 compat_uptr_t bq_key;
512 compat_uptr_t np_prime;
513 compat_uptr_t nq_prime;
514 compat_uptr_t u_mult_inv;
518 trans_modexpo_crt32(unsigned int fd, unsigned int cmd, unsigned long arg,
521 struct ica_rsa_modexpo_crt_32 __user *crt32u = compat_ptr(arg);
522 struct ica_rsa_modexpo_crt_32 crt32k;
523 struct ica_rsa_modexpo_crt __user *crt64;
527 if (!access_ok(VERIFY_WRITE, crt32u,
528 sizeof(struct ica_rsa_modexpo_crt_32)))
530 crt64 = compat_alloc_user_space(sizeof(struct ica_rsa_modexpo_crt));
531 if (!access_ok(VERIFY_WRITE, crt64, sizeof(struct ica_rsa_modexpo_crt)))
533 if (copy_from_user(&crt32k, crt32u,
534 sizeof(struct ica_rsa_modexpo_crt_32)))
536 if (__put_user(compat_ptr(crt32k.inputdata), &crt64->inputdata) ||
537 __put_user(crt32k.inputdatalength, &crt64->inputdatalength) ||
538 __put_user(compat_ptr(crt32k.outputdata), &crt64->outputdata) ||
539 __put_user(crt32k.outputdatalength, &crt64->outputdatalength) ||
540 __put_user(compat_ptr(crt32k.bp_key), &crt64->bp_key) ||
541 __put_user(compat_ptr(crt32k.bq_key), &crt64->bq_key) ||
542 __put_user(compat_ptr(crt32k.np_prime), &crt64->np_prime) ||
543 __put_user(compat_ptr(crt32k.nq_prime), &crt64->nq_prime) ||
544 __put_user(compat_ptr(crt32k.u_mult_inv), &crt64->u_mult_inv))
547 ret = sys_ioctl(fd, cmd, (unsigned long)crt64);
549 if (__get_user(i, &crt64->outputdatalength) ||
550 __put_user(i, &crt32u->outputdatalength))
555 static int compatible_ioctls[] = {
556 ICAZ90STATUS, Z90QUIESCE, Z90STAT_TOTALCOUNT, Z90STAT_PCICACOUNT,
557 Z90STAT_PCICCCOUNT, Z90STAT_PCIXCCCOUNT, Z90STAT_REQUESTQ_COUNT,
558 Z90STAT_PENDINGQ_COUNT, Z90STAT_TOTALOPEN_COUNT, Z90STAT_DOMAIN_INDEX,
559 Z90STAT_STATUS_MASK, Z90STAT_QDEPTH_MASK, Z90STAT_PERDEV_REQCNT,
562 static void z90_unregister_ioctl32s(void)
566 unregister_ioctl32_conversion(ICARSAMODEXPO);
567 unregister_ioctl32_conversion(ICARSACRT);
569 for(i = 0; i < ARRAY_SIZE(compatible_ioctls); i++)
570 unregister_ioctl32_conversion(compatible_ioctls[i]);
573 static int z90_register_ioctl32s(void)
577 result = register_ioctl32_conversion(ICARSAMODEXPO, trans_modexpo32);
580 result = register_ioctl32_conversion(ICARSACRT, trans_modexpo_crt32);
584 for(i = 0; i < ARRAY_SIZE(compatible_ioctls); i++) {
585 result = register_ioctl32_conversion(compatible_ioctls[i],NULL);
587 z90_unregister_ioctl32s();
593 #else // !CONFIG_COMPAT
594 static inline void z90_unregister_ioctl32s(void)
598 static inline int z90_register_ioctl32s(void)
605 * The module initialization code.
608 z90crypt_init_module(void)
611 struct proc_dir_entry *entry;
613 PDEBUG("PID %d\n", PID());
615 #ifndef Z90CRYPT_USE_HOTPLUG
616 /* Register as misc device with given minor (or get a dynamic one). */
617 result = misc_register(&z90crypt_misc_device);
619 PRINTKW(KERN_ERR "misc_register (minor %d) failed with %d\n",
620 z90crypt_misc_device.minor, result);
624 /* Register the major (or get a dynamic one). */
625 result = register_chrdev(z90crypt_major, REG_NAME, &z90crypt_fops);
627 PRINTKW("register_chrdev (major %d) failed with %d.\n",
628 z90crypt_major, result);
632 if (z90crypt_major == 0)
633 z90crypt_major = result;
636 PDEBUG("Registered " DEV_NAME " with result %d\n", result);
638 result = create_z90crypt(&domain);
640 PRINTKW("create_z90crypt (domain index %d) failed with %d.\n",
643 goto init_module_cleanup;
647 PRINTKN("Version %d.%d.%d loaded, built on %s %s\n",
648 z90crypt_VERSION, z90crypt_RELEASE, z90crypt_VARIANT,
650 PRINTKN("%s\n", z90cmain_version);
651 PRINTKN("%s\n", z90chardware_version);
652 PDEBUG("create_z90crypt (domain index %d) successful.\n",
655 PRINTK("No devices at startup\n");
657 #ifdef Z90CRYPT_USE_HOTPLUG
658 /* generate hotplug event for device node generation */
659 z90crypt_hotplug_event(z90crypt_major, 0, Z90CRYPT_HOTPLUG_ADD);
662 /* Initialize globals. */
663 spin_lock_init(&queuespinlock);
665 INIT_LIST_HEAD(&pending_list);
668 INIT_LIST_HEAD(&request_list);
671 quiesce_z90crypt = 0;
673 atomic_set(&total_open, 0);
674 atomic_set(&z90crypt_step, 0);
676 /* Set up the cleanup task. */
677 init_timer(&cleanup_timer);
678 cleanup_timer.function = z90crypt_cleanup_task;
679 cleanup_timer.data = 0;
680 cleanup_timer.expires = jiffies + (CLEANUPTIME * HZ);
681 add_timer(&cleanup_timer);
683 /* Set up the proc file system */
684 entry = create_proc_entry("driver/z90crypt", 0644, 0);
688 entry->read_proc = z90crypt_status;
689 entry->write_proc = z90crypt_status_write;
692 PRINTK("Couldn't create z90crypt proc entry\n");
693 z90crypt_entry = entry;
695 /* Set up the configuration task. */
696 init_timer(&config_timer);
697 config_timer.function = z90crypt_config_task;
698 config_timer.data = 0;
699 config_timer.expires = jiffies + (INITIAL_CONFIGTIME * HZ);
700 add_timer(&config_timer);
702 /* Set up the reader task */
703 tasklet_init(&reader_tasklet, z90crypt_reader_task, 0);
704 init_timer(&reader_timer);
705 reader_timer.function = z90crypt_schedule_reader_task;
706 reader_timer.data = 0;
707 reader_timer.expires = jiffies + (READERTIME * HZ / 1000);
708 add_timer(&reader_timer);
710 if ((result = z90_register_ioctl32s()))
711 goto init_module_cleanup;
716 z90_unregister_ioctl32s();
718 #ifndef Z90CRYPT_USE_HOTPLUG
719 if ((nresult = misc_deregister(&z90crypt_misc_device)))
720 PRINTK("misc_deregister failed with %d.\n", nresult);
722 PDEBUG("misc_deregister successful.\n");
724 if ((nresult = unregister_chrdev(z90crypt_major, REG_NAME)))
725 PRINTK("unregister_chrdev failed with %d.\n", nresult);
727 PDEBUG("unregister_chrdev successful.\n");
730 return result; // failure
734 * The module termination code
737 z90crypt_cleanup_module(void)
741 PDEBUG("PID %d\n", PID());
743 z90_unregister_ioctl32s();
745 remove_proc_entry("driver/z90crypt", 0);
747 #ifndef Z90CRYPT_USE_HOTPLUG
748 if ((nresult = misc_deregister(&z90crypt_misc_device)))
749 PRINTK("misc_deregister failed with %d.\n", nresult);
751 PDEBUG("misc_deregister successful.\n");
753 z90crypt_hotplug_event(z90crypt_major, 0, Z90CRYPT_HOTPLUG_REMOVE);
755 if ((nresult = unregister_chrdev(z90crypt_major, REG_NAME)))
756 PRINTK("unregister_chrdev failed with %d.\n", nresult);
758 PDEBUG("unregister_chrdev successful.\n");
761 /* Remove the tasks */
762 tasklet_kill(&reader_tasklet);
763 del_timer(&reader_timer);
764 del_timer(&config_timer);
765 del_timer(&cleanup_timer);
769 PRINTKN("Unloaded.\n");
773 * Functions running under a process id
782 * z90crypt_status_write
792 * z90crypt_process_results
796 z90crypt_open(struct inode *inode, struct file *filp)
798 struct priv_data *private_data_p;
800 if (quiesce_z90crypt)
803 private_data_p = kmalloc(sizeof(struct priv_data), GFP_KERNEL);
804 if (!private_data_p) {
805 PRINTK("Memory allocate failed\n");
809 memset((void *)private_data_p, 0, sizeof(struct priv_data));
810 private_data_p->status = STAT_OPEN;
811 private_data_p->opener_pid = PID();
812 filp->private_data = private_data_p;
813 atomic_inc(&total_open);
819 z90crypt_release(struct inode *inode, struct file *filp)
821 struct priv_data *private_data_p = filp->private_data;
823 PDEBUG("PID %d (filp %p)\n", PID(), filp);
825 private_data_p->status = STAT_CLOSED;
826 memset(private_data_p, 0, sizeof(struct priv_data));
827 kfree(private_data_p);
828 atomic_dec(&total_open);
834 * there are two read functions, of which compile options will choose one
835 * without USE_GET_RANDOM_BYTES
836 * => read() always returns -EPERM;
838 * => read() uses get_random_bytes() kernel function
840 #ifndef USE_GET_RANDOM_BYTES
842 * z90crypt_read will not be supported beyond z90crypt 1.3.1
845 z90crypt_read(struct file *filp, char __user *buf, size_t count, loff_t *f_pos)
847 PDEBUG("filp %p (PID %d)\n", filp, PID());
850 #else // we want to use get_random_bytes
852 * read() just returns a string of random bytes. Since we have no way
853 * to generate these cryptographically, we just execute get_random_bytes
854 * for the length specified.
856 #include <linux/random.h>
858 z90crypt_read(struct file *filp, char __user *buf, size_t count, loff_t *f_pos)
860 unsigned char *temp_buff;
862 PDEBUG("filp %p (PID %d)\n", filp, PID());
864 if (quiesce_z90crypt)
867 PRINTK("Requested random byte count negative: %ld\n", count);
870 if (count > RESPBUFFSIZE) {
871 PDEBUG("count[%d] > RESPBUFFSIZE", count);
876 temp_buff = kmalloc(RESPBUFFSIZE, GFP_KERNEL);
878 PRINTK("Memory allocate failed\n");
881 get_random_bytes(temp_buff, count);
883 if (copy_to_user(buf, temp_buff, count) != 0) {
893 * Write is is not allowed
896 z90crypt_write(struct file *filp, const char __user *buf, size_t count, loff_t *f_pos)
898 PDEBUG("filp %p (PID %d)\n", filp, PID());
903 * New status functions
906 get_status_totalcount(void)
908 return z90crypt.hdware_info->hdware_mask.st_count;
912 get_status_PCICAcount(void)
914 return z90crypt.hdware_info->type_mask[PCICA].st_count;
918 get_status_PCICCcount(void)
920 return z90crypt.hdware_info->type_mask[PCICC].st_count;
924 get_status_PCIXCCcount(void)
926 return z90crypt.hdware_info->type_mask[PCIXCC].st_count;
930 get_status_requestq_count(void)
932 return requestq_count;
936 get_status_pendingq_count(void)
938 return pendingq_count;
942 get_status_totalopen_count(void)
944 return atomic_read(&total_open);
948 get_status_domain_index(void)
953 static inline unsigned char *
954 get_status_status_mask(unsigned char status[Z90CRYPT_NUM_APS])
958 memcpy(status, z90crypt.hdware_info->device_type_array,
961 for (i = 0; i < get_status_totalcount(); i++) {
963 if (LONG2DEVPTR(ix)->user_disabled)
970 static inline unsigned char *
971 get_status_qdepth_mask(unsigned char qdepth[Z90CRYPT_NUM_APS])
975 memset(qdepth, 0, Z90CRYPT_NUM_APS);
977 for (i = 0; i < get_status_totalcount(); i++) {
979 qdepth[ix] = LONG2DEVPTR(ix)->dev_caller_count;
985 static inline unsigned int *
986 get_status_perdevice_reqcnt(unsigned int reqcnt[Z90CRYPT_NUM_APS])
990 memset(reqcnt, 0, Z90CRYPT_NUM_APS * sizeof(int));
992 for (i = 0; i < get_status_totalcount(); i++) {
994 reqcnt[ix] = LONG2DEVPTR(ix)->dev_total_req_cnt;
1001 init_work_element(struct work_element *we_p,
1002 struct priv_data *priv_data, pid_t pid)
1006 we_p->requestptr = (unsigned char *)we_p + sizeof(struct work_element);
1007 /* Come up with a unique id for this caller. */
1008 step = atomic_inc_return(&z90crypt_step);
1009 memcpy(we_p->caller_id+0, (void *) &pid, sizeof(pid));
1010 memcpy(we_p->caller_id+4, (void *) &step, sizeof(step));
1012 we_p->priv_data = priv_data;
1013 we_p->status[0] = STAT_DEFAULT;
1014 we_p->audit[0] = 0x00;
1015 we_p->audit[1] = 0x00;
1016 we_p->audit[2] = 0x00;
1017 we_p->resp_buff_size = 0;
1019 we_p->devindex = -1; // send_to_crypto selects the device
1020 we_p->devtype = -1; // getCryptoBuffer selects the type
1021 atomic_set(&we_p->alarmrung, 0);
1022 init_waitqueue_head(&we_p->waitq);
1023 INIT_LIST_HEAD(&(we_p->liste));
1027 allocate_work_element(struct work_element **we_pp,
1028 struct priv_data *priv_data_p, pid_t pid)
1030 struct work_element *we_p;
1032 we_p = (struct work_element *) get_zeroed_page(GFP_KERNEL);
1035 init_work_element(we_p, priv_data_p, pid);
1041 remove_device(struct device *device_p)
1043 if (!device_p || device_p->disabled != 0)
1045 device_p->disabled = 1;
1046 z90crypt.hdware_info->type_mask[device_p->dev_type].disabled_count++;
1047 z90crypt.hdware_info->hdware_mask.disabled_count++;
1051 select_device_type(int *dev_type_p)
1053 struct status *stat;
1054 if ((*dev_type_p != PCICC) && (*dev_type_p != PCICA) &&
1055 (*dev_type_p != PCIXCC) && (*dev_type_p != ANYDEV))
1057 if (*dev_type_p != ANYDEV) {
1058 stat = &z90crypt.hdware_info->type_mask[*dev_type_p];
1059 if (stat->st_count >
1060 stat->disabled_count + stat->user_disabled_count)
1065 stat = &z90crypt.hdware_info->type_mask[PCICA];
1066 if (stat->st_count > stat->disabled_count + stat->user_disabled_count) {
1067 *dev_type_p = PCICA;
1071 stat = &z90crypt.hdware_info->type_mask[PCIXCC];
1072 if (stat->st_count > stat->disabled_count + stat->user_disabled_count) {
1073 *dev_type_p = PCIXCC;
1077 stat = &z90crypt.hdware_info->type_mask[PCICC];
1078 if (stat->st_count > stat->disabled_count + stat->user_disabled_count) {
1079 *dev_type_p = PCICC;
1087 * Try the selected number, then the selected type (can be ANYDEV)
1090 select_device(int *dev_type_p, int *device_nr_p)
1092 int i, indx, devTp, low_count, low_indx;
1093 struct device_x *index_p;
1094 struct device *dev_ptr;
1096 PDEBUG("device type = %d, index = %d\n", *dev_type_p, *device_nr_p);
1097 if ((*device_nr_p >= 0) && (*device_nr_p < Z90CRYPT_NUM_DEVS)) {
1098 PDEBUG("trying index = %d\n", *device_nr_p);
1099 dev_ptr = z90crypt.device_p[*device_nr_p];
1102 dev_ptr->dev_stat != DEV_GONE &&
1103 dev_ptr->disabled == 0 &&
1104 dev_ptr->user_disabled == 0) {
1105 PDEBUG("selected by number, index = %d\n",
1107 *dev_type_p = dev_ptr->dev_type;
1108 return *device_nr_p;
1112 PDEBUG("trying type = %d\n", *dev_type_p);
1113 devTp = *dev_type_p;
1114 if (select_device_type(&devTp) == -1) {
1115 PDEBUG("failed to select by type\n");
1118 PDEBUG("selected type = %d\n", devTp);
1119 index_p = &z90crypt.hdware_info->type_x_addr[devTp];
1120 low_count = 0x0000FFFF;
1122 for (i = 0; i < z90crypt.hdware_info->type_mask[devTp].st_count; i++) {
1123 indx = index_p->device_index[i];
1124 dev_ptr = z90crypt.device_p[indx];
1126 dev_ptr->dev_stat != DEV_GONE &&
1127 dev_ptr->disabled == 0 &&
1128 dev_ptr->user_disabled == 0 &&
1129 devTp == dev_ptr->dev_type &&
1130 low_count > dev_ptr->dev_caller_count) {
1131 low_count = dev_ptr->dev_caller_count;
1135 *device_nr_p = low_indx;
1140 send_to_crypto_device(struct work_element *we_p)
1142 struct caller *caller_p;
1143 struct device *device_p;
1146 if (!we_p->requestptr)
1147 return SEN_FATAL_ERROR;
1148 caller_p = (struct caller *)we_p->requestptr;
1149 dev_nr = we_p->devindex;
1150 if (select_device(&we_p->devtype, &dev_nr) == -1) {
1151 if (z90crypt.hdware_info->hdware_mask.st_count != 0)
1154 return SEN_NOT_AVAIL;
1156 we_p->devindex = dev_nr;
1157 device_p = z90crypt.device_p[dev_nr];
1159 return SEN_NOT_AVAIL;
1160 if (device_p->dev_type != we_p->devtype)
1162 if (device_p->dev_caller_count >= device_p->dev_q_depth)
1163 return SEN_QUEUE_FULL;
1164 PDEBUG("device number prior to send: %d\n", dev_nr);
1165 switch (send_to_AP(dev_nr, z90crypt.cdx,
1166 caller_p->caller_dev_dep_req_l,
1167 caller_p->caller_dev_dep_req_p)) {
1168 case DEV_SEN_EXCEPTION:
1169 PRINTKC("Exception during send to device %d\n", dev_nr);
1170 z90crypt.terminating = 1;
1171 return SEN_FATAL_ERROR;
1173 PRINTK("Device %d not available\n", dev_nr);
1174 remove_device(device_p);
1175 return SEN_NOT_AVAIL;
1177 return SEN_NOT_AVAIL;
1179 return SEN_FATAL_ERROR;
1180 case DEV_BAD_MESSAGE:
1181 return SEN_USER_ERROR;
1182 case DEV_QUEUE_FULL:
1183 return SEN_QUEUE_FULL;
1188 list_add_tail(&(caller_p->caller_liste), &(device_p->dev_caller_list));
1189 device_p->dev_caller_count++;
1194 * Send puts the user's work on one of two queues:
1195 * the pending queue if the send was successful
1196 * the request queue if the send failed because device full or busy
1199 z90crypt_send(struct work_element *we_p, const char *buf)
1203 PDEBUG("PID %d\n", PID());
1205 if (CHK_RDWRMASK(we_p->status[0]) != STAT_NOWORK) {
1206 PDEBUG("PID %d tried to send more work but has outstanding "
1210 we_p->devindex = -1; // Reset device number
1211 spin_lock_irq(&queuespinlock);
1212 rv = send_to_crypto_device(we_p);
1215 we_p->requestsent = jiffies;
1216 we_p->audit[0] |= FP_SENT;
1217 list_add_tail(&we_p->liste, &pending_list);
1219 we_p->audit[0] |= FP_PENDING;
1222 case SEN_QUEUE_FULL:
1224 we_p->devindex = -1; // any device will do
1225 we_p->requestsent = jiffies;
1226 list_add_tail(&we_p->liste, &request_list);
1228 we_p->audit[0] |= FP_REQUEST;
1234 PRINTK("*** No devices available.\n");
1235 rv = we_p->retcode = -ENODEV;
1236 we_p->status[0] |= STAT_FAILED;
1238 case REC_OPERAND_INV:
1239 case REC_OPERAND_SIZE:
1241 case REC_INVALID_PAD:
1242 rv = we_p->retcode = -EINVAL;
1243 we_p->status[0] |= STAT_FAILED;
1247 we_p->status[0] |= STAT_FAILED;
1250 if (rv != -ERESTARTSYS)
1251 SET_RDWRMASK(we_p->status[0], STAT_WRITTEN);
1252 spin_unlock_irq(&queuespinlock);
1254 tasklet_schedule(&reader_tasklet);
1259 * process_results copies the user's work from kernel space.
1262 z90crypt_process_results(struct work_element *we_p, char __user *buf)
1266 PDEBUG("we_p %p (PID %d)\n", we_p, PID());
1268 LONG2DEVPTR(we_p->devindex)->dev_total_req_cnt++;
1269 SET_RDWRMASK(we_p->status[0], STAT_READPEND);
1272 if (!we_p->buffer) {
1273 PRINTK("we_p %p PID %d in STAT_READPEND: buffer NULL.\n",
1279 if ((rv = copy_to_user(buf, we_p->buffer, we_p->buff_size))) {
1280 PDEBUG("copy_to_user failed: rv = %d\n", rv);
1287 if (we_p->resp_buff_size
1288 && copy_to_user(we_p->resp_addr, we_p->resp_buff,
1289 we_p->resp_buff_size))
1292 SET_RDWRMASK(we_p->status[0], STAT_NOWORK);
1296 static unsigned char NULL_psmid[8] =
1297 {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00};
1300 * MIN_MOD_SIZE is a PCICC and PCIXCC limit.
1301 * MAX_PCICC_MOD_SIZE is a hard limit for the PCICC.
1302 * MAX_MOD_SIZE is a hard limit for the PCIXCC and PCICA.
1304 #define MIN_MOD_SIZE 64
1305 #define MAX_PCICC_MOD_SIZE 128
1306 #define MAX_MOD_SIZE 256
1309 * Used in device configuration functions
1311 #define MAX_RESET 90
1314 * This is used only for PCICC support
1317 is_PKCS11_padded(unsigned char *buffer, int length)
1320 if ((buffer[0] != 0x00) || (buffer[1] != 0x01))
1322 for (i = 2; i < length; i++)
1323 if (buffer[i] != 0xFF)
1325 if ((i < 10) || (i == length))
1327 if (buffer[i] != 0x00)
1333 * This is used only for PCICC support
1336 is_PKCS12_padded(unsigned char *buffer, int length)
1339 if ((buffer[0] != 0x00) || (buffer[1] != 0x02))
1341 for (i = 2; i < length; i++)
1342 if (buffer[i] == 0x00)
1344 if ((i < 10) || (i == length))
1346 if (buffer[i] != 0x00)
1352 * builds struct caller and converts message from generic format to
1353 * device-dependent format
1354 * func is ICARSAMODEXPO or ICARSACRT
1355 * function is PCI_FUNC_KEY_ENCRYPT or PCI_FUNC_KEY_DECRYPT
1358 build_caller(struct work_element *we_p, short function)
1361 struct caller *caller_p = (struct caller *)we_p->requestptr;
1363 if ((we_p->devtype != PCICC) && (we_p->devtype != PCICA) &&
1364 (we_p->devtype != PCIXCC))
1365 return SEN_NOT_AVAIL;
1367 memcpy(caller_p->caller_id, we_p->caller_id,
1368 sizeof(caller_p->caller_id));
1369 caller_p->caller_dev_dep_req_p = caller_p->caller_dev_dep_req;
1370 caller_p->caller_dev_dep_req_l = MAX_RESPONSE_SIZE;
1371 caller_p->caller_buf_p = we_p->buffer;
1372 INIT_LIST_HEAD(&(caller_p->caller_liste));
1374 rv = convert_request(we_p->buffer, we_p->funccode, function,
1375 z90crypt.cdx, we_p->devtype,
1376 &caller_p->caller_dev_dep_req_l,
1377 caller_p->caller_dev_dep_req_p);
1379 if (rv == SEN_NOT_AVAIL)
1380 PDEBUG("request can't be processed on hdwr avail\n");
1382 PRINTK("Error from convert_request: %d\n", rv);
1385 memcpy(&(caller_p->caller_dev_dep_req_p[4]), we_p->caller_id,8);
1390 unbuild_caller(struct device *device_p, struct caller *caller_p)
1394 if (caller_p->caller_liste.next && caller_p->caller_liste.prev)
1395 if (!list_empty(&caller_p->caller_liste)) {
1396 list_del(&caller_p->caller_liste);
1397 device_p->dev_caller_count--;
1398 INIT_LIST_HEAD(&caller_p->caller_liste);
1400 memset(caller_p->caller_id, 0, sizeof(caller_p->caller_id));
1404 get_crypto_request_buffer(struct work_element *we_p)
1406 struct ica_rsa_modexpo *mex_p;
1407 struct ica_rsa_modexpo_crt *crt_p;
1408 unsigned char *temp_buffer;
1412 mex_p = (struct ica_rsa_modexpo *) we_p->buffer;
1413 crt_p = (struct ica_rsa_modexpo_crt *) we_p->buffer;
1415 PDEBUG("device type input = %d\n", we_p->devtype);
1417 if (z90crypt.terminating)
1418 return REC_NO_RESPONSE;
1419 if (memcmp(we_p->caller_id, NULL_psmid, 8) == 0) {
1420 PRINTK("psmid zeroes\n");
1421 return SEN_FATAL_ERROR;
1423 if (!we_p->buffer) {
1424 PRINTK("buffer pointer NULL\n");
1425 return SEN_USER_ERROR;
1427 if (!we_p->requestptr) {
1428 PRINTK("caller pointer NULL\n");
1429 return SEN_USER_ERROR;
1432 if ((we_p->devtype != PCICA) && (we_p->devtype != PCICC) &&
1433 (we_p->devtype != PCIXCC) && (we_p->devtype != ANYDEV)) {
1434 PRINTK("invalid device type\n");
1435 return SEN_USER_ERROR;
1438 if ((mex_p->inputdatalength < 1) ||
1439 (mex_p->inputdatalength > MAX_MOD_SIZE)) {
1440 PRINTK("inputdatalength[%d] is not valid\n",
1441 mex_p->inputdatalength);
1442 return SEN_USER_ERROR;
1445 if (mex_p->outputdatalength < mex_p->inputdatalength) {
1446 PRINTK("outputdatalength[%d] < inputdatalength[%d]\n",
1447 mex_p->outputdatalength, mex_p->inputdatalength);
1448 return SEN_USER_ERROR;
1451 if (!mex_p->inputdata || !mex_p->outputdata) {
1452 PRINTK("inputdata[%p] or outputdata[%p] is NULL\n",
1453 mex_p->outputdata, mex_p->inputdata);
1454 return SEN_USER_ERROR;
1458 * As long as outputdatalength is big enough, we can set the
1459 * outputdatalength equal to the inputdatalength, since that is the
1460 * number of bytes we will copy in any case
1462 mex_p->outputdatalength = mex_p->inputdatalength;
1465 switch (we_p->funccode) {
1467 if (!mex_p->b_key || !mex_p->n_modulus)
1468 rv = SEN_USER_ERROR;
1471 if (!IS_EVEN(crt_p->inputdatalength)) {
1472 PRINTK("inputdatalength[%d] is odd, CRT form\n",
1473 crt_p->inputdatalength);
1474 rv = SEN_USER_ERROR;
1477 if (!crt_p->bp_key ||
1481 !crt_p->u_mult_inv) {
1482 PRINTK("CRT form, bad data: %p/%p/%p/%p/%p\n",
1483 crt_p->bp_key, crt_p->bq_key,
1484 crt_p->np_prime, crt_p->nq_prime,
1486 rv = SEN_USER_ERROR;
1490 PRINTK("bad func = %d\n", we_p->funccode);
1491 rv = SEN_USER_ERROR;
1497 if (select_device_type(&we_p->devtype) < 0)
1498 return SEN_NOT_AVAIL;
1500 temp_buffer = (unsigned char *)we_p + sizeof(struct work_element) +
1501 sizeof(struct caller);
1502 if (copy_from_user(temp_buffer, mex_p->inputdata,
1503 mex_p->inputdatalength) != 0)
1504 return SEN_RELEASED;
1506 function = PCI_FUNC_KEY_ENCRYPT;
1507 switch (we_p->devtype) {
1508 /* PCICA does everything with a simple RSA mod-expo operation */
1510 function = PCI_FUNC_KEY_ENCRYPT;
1513 * PCIXCC does all Mod-Expo form with a simple RSA mod-expo
1514 * operation, and all CRT forms with a PKCS-1.2 format decrypt.
1517 /* Anything less than MIN_MOD_SIZE MUST go to a PCICA */
1518 if (mex_p->inputdatalength < MIN_MOD_SIZE)
1519 return SEN_NOT_AVAIL;
1520 if (we_p->funccode == ICARSAMODEXPO)
1521 function = PCI_FUNC_KEY_ENCRYPT;
1523 function = PCI_FUNC_KEY_DECRYPT;
1526 * PCICC does everything as a PKCS-1.2 format request
1529 /* Anything less than MIN_MOD_SIZE MUST go to a PCICA */
1530 if (mex_p->inputdatalength < MIN_MOD_SIZE) {
1531 return SEN_NOT_AVAIL;
1533 /* Anythings over MAX_PCICC_MOD_SIZE MUST go to a PCICA */
1534 if (mex_p->inputdatalength > MAX_PCICC_MOD_SIZE) {
1535 return SEN_NOT_AVAIL;
1537 /* PCICC cannot handle input that is is PKCS#1.1 padded */
1538 if (is_PKCS11_padded(temp_buffer, mex_p->inputdatalength)) {
1539 return SEN_NOT_AVAIL;
1541 if (we_p->funccode == ICARSAMODEXPO) {
1542 if (is_PKCS12_padded(temp_buffer,
1543 mex_p->inputdatalength))
1544 function = PCI_FUNC_KEY_ENCRYPT;
1546 function = PCI_FUNC_KEY_DECRYPT;
1548 /* all CRT forms are decrypts */
1549 function = PCI_FUNC_KEY_DECRYPT;
1552 PDEBUG("function: %04x\n", function);
1553 rv = build_caller(we_p, function);
1554 PDEBUG("rv from build_caller = %d\n", rv);
1559 z90crypt_prepare(struct work_element *we_p, unsigned int funccode,
1560 const char __user *buffer)
1564 we_p->devindex = -1;
1565 if (funccode == ICARSAMODEXPO)
1566 we_p->buff_size = sizeof(struct ica_rsa_modexpo);
1568 we_p->buff_size = sizeof(struct ica_rsa_modexpo_crt);
1570 if (copy_from_user(we_p->buffer, buffer, we_p->buff_size))
1573 we_p->audit[0] |= FP_COPYFROM;
1574 SET_RDWRMASK(we_p->status[0], STAT_WRITTEN);
1575 we_p->funccode = funccode;
1577 we_p->audit[0] |= FP_BUFFREQ;
1578 rv = get_crypto_request_buffer(we_p);
1581 we_p->audit[0] |= FP_BUFFGOT;
1583 case SEN_USER_ERROR:
1586 case SEN_QUEUE_FULL:
1592 case REC_NO_RESPONSE:
1599 PRINTK("rv = %d\n", rv);
1603 if (CHK_RDWRMASK(we_p->status[0]) == STAT_WRITTEN)
1604 SET_RDWRMASK(we_p->status[0], STAT_DEFAULT);
1609 purge_work_element(struct work_element *we_p)
1611 struct list_head *lptr;
1613 spin_lock_irq(&queuespinlock);
1614 list_for_each(lptr, &request_list) {
1615 if (lptr == &we_p->liste) {
1621 list_for_each(lptr, &pending_list) {
1622 if (lptr == &we_p->liste) {
1628 spin_unlock_irq(&queuespinlock);
1632 * Build the request and send it.
1635 z90crypt_rsa(struct priv_data *private_data_p, pid_t pid,
1636 unsigned int cmd, unsigned long arg)
1638 struct work_element *we_p;
1641 if ((rv = allocate_work_element(&we_p, private_data_p, pid))) {
1642 PDEBUG("PID %d: allocate_work_element returned ENOMEM\n", pid);
1645 if ((rv = z90crypt_prepare(we_p, cmd, (const char __user *)arg)))
1646 PDEBUG("PID %d: rv = %d from z90crypt_prepare\n", pid, rv);
1648 if ((rv = z90crypt_send(we_p, (const char *)arg)))
1649 PDEBUG("PID %d: rv %d from z90crypt_send.\n", pid, rv);
1651 we_p->audit[0] |= FP_ASLEEP;
1652 wait_event(we_p->waitq, atomic_read(&we_p->alarmrung));
1653 we_p->audit[0] |= FP_AWAKE;
1657 rv = z90crypt_process_results(we_p, (char __user *)arg);
1659 if ((we_p->status[0] & STAT_FAILED)) {
1662 * EINVAL *after* receive is almost always padding
1663 * error issued by a PCICC or PCIXCC. We convert this
1664 * return value to -EGETBUFF which should trigger a
1665 * fallback to software.
1668 if ((we_p->devtype == PCICC) ||
1669 (we_p->devtype == PCIXCC))
1673 if (z90crypt.mask.st_count > 0)
1674 rv = -ERESTARTSYS; // retry with another
1676 rv = -ENODEV; // no cards left
1677 /* fall through to clean up request queue */
1680 switch (CHK_RDWRMASK(we_p->status[0])) {
1682 purge_work_element(we_p);
1691 we_p->status[0] ^= STAT_FAILED;
1695 free_page((long)we_p);
1700 * This function is a little long, but it's really just one large switch
1704 z90crypt_ioctl(struct inode *inode, struct file *filp,
1705 unsigned int cmd, unsigned long arg)
1707 struct priv_data *private_data_p = filp->private_data;
1708 unsigned char *status;
1709 unsigned char *qdepth;
1710 unsigned int *reqcnt;
1711 struct ica_z90_status *pstat;
1712 int ret, i, loopLim, tempstat;
1713 static int deprecated_msg_count = 0;
1715 PDEBUG("filp %p (PID %d), cmd 0x%08X\n", filp, PID(), cmd);
1716 PDEBUG("cmd 0x%08X: dir %s, size 0x%04X, type 0x%02X, nr 0x%02X\n",
1718 !_IOC_DIR(cmd) ? "NO"
1719 : ((_IOC_DIR(cmd) == (_IOC_READ|_IOC_WRITE)) ? "RW"
1720 : ((_IOC_DIR(cmd) == _IOC_READ) ? "RD"
1722 _IOC_SIZE(cmd), _IOC_TYPE(cmd), _IOC_NR(cmd));
1724 if (_IOC_TYPE(cmd) != Z90_IOCTL_MAGIC) {
1725 PRINTK("cmd 0x%08X contains bad magic\n", cmd);
1733 if (quiesce_z90crypt) {
1737 ret = -ENODEV; // Default if no devices
1738 loopLim = z90crypt.hdware_info->hdware_mask.st_count -
1739 (z90crypt.hdware_info->hdware_mask.disabled_count +
1740 z90crypt.hdware_info->hdware_mask.user_disabled_count);
1741 for (i = 0; i < loopLim; i++) {
1742 ret = z90crypt_rsa(private_data_p, PID(), cmd, arg);
1743 if (ret != -ERESTARTSYS)
1746 if (ret == -ERESTARTSYS)
1750 case Z90STAT_TOTALCOUNT:
1751 tempstat = get_status_totalcount();
1752 if (copy_to_user((int __user *)arg, &tempstat,sizeof(int)) != 0)
1756 case Z90STAT_PCICACOUNT:
1757 tempstat = get_status_PCICAcount();
1758 if (copy_to_user((int __user *)arg, &tempstat, sizeof(int)) != 0)
1762 case Z90STAT_PCICCCOUNT:
1763 tempstat = get_status_PCICCcount();
1764 if (copy_to_user((int __user *)arg, &tempstat, sizeof(int)) != 0)
1768 case Z90STAT_PCIXCCCOUNT:
1769 tempstat = get_status_PCIXCCcount();
1770 if (copy_to_user((int __user *)arg, &tempstat, sizeof(int)) != 0)
1774 case Z90STAT_REQUESTQ_COUNT:
1775 tempstat = get_status_requestq_count();
1776 if (copy_to_user((int __user *)arg, &tempstat, sizeof(int)) != 0)
1780 case Z90STAT_PENDINGQ_COUNT:
1781 tempstat = get_status_pendingq_count();
1782 if (copy_to_user((int __user *)arg, &tempstat, sizeof(int)) != 0)
1786 case Z90STAT_TOTALOPEN_COUNT:
1787 tempstat = get_status_totalopen_count();
1788 if (copy_to_user((int __user *)arg, &tempstat, sizeof(int)) != 0)
1792 case Z90STAT_DOMAIN_INDEX:
1793 tempstat = get_status_domain_index();
1794 if (copy_to_user((int __user *)arg, &tempstat, sizeof(int)) != 0)
1798 case Z90STAT_STATUS_MASK:
1799 status = kmalloc(Z90CRYPT_NUM_APS, GFP_KERNEL);
1801 PRINTK("kmalloc for status failed!\n");
1805 get_status_status_mask(status);
1806 if (copy_to_user((char __user *) arg, status, Z90CRYPT_NUM_APS)
1812 case Z90STAT_QDEPTH_MASK:
1813 qdepth = kmalloc(Z90CRYPT_NUM_APS, GFP_KERNEL);
1815 PRINTK("kmalloc for qdepth failed!\n");
1819 get_status_qdepth_mask(qdepth);
1820 if (copy_to_user((char __user *) arg, qdepth, Z90CRYPT_NUM_APS) != 0)
1825 case Z90STAT_PERDEV_REQCNT:
1826 reqcnt = kmalloc(sizeof(int) * Z90CRYPT_NUM_APS, GFP_KERNEL);
1828 PRINTK("kmalloc for reqcnt failed!\n");
1832 get_status_perdevice_reqcnt(reqcnt);
1833 if (copy_to_user((char __user *) arg, reqcnt,
1834 Z90CRYPT_NUM_APS * sizeof(int)) != 0)
1839 /* THIS IS DEPRECATED. USE THE NEW STATUS CALLS */
1841 if (deprecated_msg_count < 100) {
1842 PRINTK("deprecated call to ioctl (ICAZ90STATUS)!\n");
1843 deprecated_msg_count++;
1844 if (deprecated_msg_count == 100)
1845 PRINTK("No longer issuing messages related to "
1846 "deprecated call to ICAZ90STATUS.\n");
1849 pstat = kmalloc(sizeof(struct ica_z90_status), GFP_KERNEL);
1851 PRINTK("kmalloc for pstat failed!\n");
1856 pstat->totalcount = get_status_totalcount();
1857 pstat->leedslitecount = get_status_PCICAcount();
1858 pstat->leeds2count = get_status_PCICCcount();
1859 pstat->requestqWaitCount = get_status_requestq_count();
1860 pstat->pendingqWaitCount = get_status_pendingq_count();
1861 pstat->totalOpenCount = get_status_totalopen_count();
1862 pstat->cryptoDomain = get_status_domain_index();
1863 get_status_status_mask(pstat->status);
1864 get_status_qdepth_mask(pstat->qdepth);
1866 if (copy_to_user((struct ica_z90_status __user *) arg, pstat,
1867 sizeof(struct ica_z90_status)) != 0)
1873 if (current->euid != 0) {
1874 PRINTK("QUIESCE fails: euid %d\n",
1878 PRINTK("QUIESCE device from PID %d\n", PID());
1879 quiesce_z90crypt = 1;
1884 /* user passed an invalid IOCTL number */
1885 PDEBUG("cmd 0x%08X contains invalid ioctl code\n", cmd);
1894 sprintcl(unsigned char *outaddr, unsigned char *addr, unsigned int len)
1899 for (i = 0; i < len; i++)
1900 hl += sprintf(outaddr+hl, "%01x", (unsigned int) addr[i]);
1901 hl += sprintf(outaddr+hl, " ");
1907 sprintrw(unsigned char *outaddr, unsigned char *addr, unsigned int len)
1911 hl = sprintf(outaddr, " ");
1913 for (c = 0; c < (len / 16); c++) {
1914 hl += sprintcl(outaddr+hl, addr+inl, 16);
1920 hl += sprintcl(outaddr+hl, addr+inl, cx);
1924 hl += sprintf(outaddr+hl, "\n");
1930 sprinthx(unsigned char *title, unsigned char *outaddr,
1931 unsigned char *addr, unsigned int len)
1935 hl = sprintf(outaddr, "\n%s\n", title);
1937 for (r = 0; r < (len / 64); r++) {
1938 hl += sprintrw(outaddr+hl, addr+inl, 64);
1943 hl += sprintrw(outaddr+hl, addr+inl, rx);
1947 hl += sprintf(outaddr+hl, "\n");
1953 sprinthx4(unsigned char *title, unsigned char *outaddr,
1954 unsigned int *array, unsigned int len)
1958 hl = sprintf(outaddr, "\n%s\n", title);
1960 for (r = 0; r < len; r++) {
1962 hl += sprintf(outaddr+hl, " ");
1963 hl += sprintf(outaddr+hl, "%08X ", array[r]);
1965 hl += sprintf(outaddr+hl, "\n");
1968 hl += sprintf(outaddr+hl, "\n");
1974 z90crypt_status(char *resp_buff, char **start, off_t offset,
1975 int count, int *eof, void *data)
1977 unsigned char *workarea;
1980 /* resp_buff is a page. Use the right half for a work area */
1981 workarea = resp_buff+2000;
1983 len += sprintf(resp_buff+len, "\nz90crypt version: %d.%d.%d\n",
1984 z90crypt_VERSION, z90crypt_RELEASE, z90crypt_VARIANT);
1985 len += sprintf(resp_buff+len, "Cryptographic domain: %d\n",
1986 get_status_domain_index());
1987 len += sprintf(resp_buff+len, "Total device count: %d\n",
1988 get_status_totalcount());
1989 len += sprintf(resp_buff+len, "PCICA count: %d\n",
1990 get_status_PCICAcount());
1991 len += sprintf(resp_buff+len, "PCICC count: %d\n",
1992 get_status_PCICCcount());
1993 len += sprintf(resp_buff+len, "PCIXCC count: %d\n",
1994 get_status_PCIXCCcount());
1995 len += sprintf(resp_buff+len, "requestq count: %d\n",
1996 get_status_requestq_count());
1997 len += sprintf(resp_buff+len, "pendingq count: %d\n",
1998 get_status_pendingq_count());
1999 len += sprintf(resp_buff+len, "Total open handles: %d\n\n",
2000 get_status_totalopen_count());
2002 "Online devices: 1 means PCICA, 2 means PCICC, 3 means PCIXCC",
2004 get_status_status_mask(workarea),
2006 len += sprinthx("Waiting work element counts",
2008 get_status_qdepth_mask(workarea),
2011 "Per-device successfully completed request counts",
2013 get_status_perdevice_reqcnt((unsigned int *)workarea),
2016 memset(workarea, 0, Z90CRYPT_NUM_APS * sizeof(unsigned int));
2021 disable_card(int card_index)
2023 struct device *devp;
2025 devp = LONG2DEVPTR(card_index);
2026 if (!devp || devp->user_disabled)
2028 devp->user_disabled = 1;
2029 z90crypt.hdware_info->hdware_mask.user_disabled_count++;
2030 if (devp->dev_type == -1)
2032 z90crypt.hdware_info->type_mask[devp->dev_type].user_disabled_count++;
2036 enable_card(int card_index)
2038 struct device *devp;
2040 devp = LONG2DEVPTR(card_index);
2041 if (!devp || !devp->user_disabled)
2043 devp->user_disabled = 0;
2044 z90crypt.hdware_info->hdware_mask.user_disabled_count--;
2045 if (devp->dev_type == -1)
2047 z90crypt.hdware_info->type_mask[devp->dev_type].user_disabled_count--;
2051 scan_char(unsigned char *bf, unsigned int len,
2052 unsigned int *offs, unsigned int *p_eof, unsigned char c)
2054 unsigned int i, found;
2057 for (i = 0; i < len; i++) {
2062 if (bf[i] == '\0') {
2066 if (bf[i] == '\n') {
2075 scan_string(unsigned char *bf, unsigned int len,
2076 unsigned int *offs, unsigned int *p_eof, unsigned char *s)
2078 unsigned int temp_len, temp_offs, found, eof;
2080 temp_len = temp_offs = found = eof = 0;
2081 while (!eof && !found) {
2082 found = scan_char(bf+temp_len, len-temp_len,
2083 &temp_offs, &eof, *s);
2085 temp_len += temp_offs;
2092 if (len >= temp_offs+strlen(s)) {
2093 found = !strncmp(bf+temp_len-1, s, strlen(s));
2095 *offs = temp_len+strlen(s)-1;
2109 z90crypt_status_write(struct file *file, const char __user *buffer,
2110 unsigned long count, void *data)
2112 int i, j, len, offs, found, eof;
2113 unsigned char *lbuf;
2114 unsigned int local_count;
2116 #define LBUFSIZE 600
2117 lbuf = kmalloc(LBUFSIZE, GFP_KERNEL);
2119 PRINTK("kmalloc failed!\n");
2126 local_count = UMIN((unsigned int)count, LBUFSIZE-1);
2128 if (copy_from_user(lbuf, buffer, local_count) != 0) {
2133 lbuf[local_count-1] = '\0';
2139 found = scan_string(lbuf+len, local_count-len, &offs, &eof,
2152 found = scan_char(lbuf+len, local_count-len, &offs, &eof, '\n');
2154 if (!found || eof) {
2161 for (i = 0; i < 80; i++) {
2162 switch (*(lbuf+len+i)) {
2196 * Functions that run under a timer, with no process id
2198 * The task functions:
2199 * z90crypt_reader_task
2201 * helper_handle_work_element
2203 * z90crypt_config_task
2204 * z90crypt_cleanup_task
2207 * z90crypt_schedule_reader_timer
2208 * z90crypt_schedule_reader_task
2209 * z90crypt_schedule_config_task
2210 * z90crypt_schedule_cleanup_task
2213 receive_from_crypto_device(int index, unsigned char *psmid, int *buff_len_p,
2214 unsigned char *buff, unsigned char __user **dest_p_p)
2217 struct device *dev_ptr;
2218 struct caller *caller_p;
2219 struct ica_rsa_modexpo *icaMsg_p;
2220 struct list_head *ptr, *tptr;
2222 memcpy(psmid, NULL_psmid, sizeof(NULL_psmid));
2224 if (z90crypt.terminating)
2225 return REC_FATAL_ERROR;
2228 dev_ptr = z90crypt.device_p[index];
2231 PDEBUG("Dequeue called for device %d\n", index);
2232 if (!dev_ptr || dev_ptr->disabled) {
2233 rv = REC_NO_RESPONSE;
2236 if (dev_ptr->dev_self_x != index) {
2237 PRINTK("Corrupt dev ptr in receive_from_AP\n");
2238 z90crypt.terminating = 1;
2239 rv = REC_FATAL_ERROR;
2242 if (!dev_ptr->dev_resp_l || !dev_ptr->dev_resp_p) {
2243 dv = DEV_REC_EXCEPTION;
2244 PRINTK("dev_resp_l = %d, dev_resp_p = %p\n",
2245 dev_ptr->dev_resp_l, dev_ptr->dev_resp_p);
2247 dv = receive_from_AP(index, z90crypt.cdx,
2248 dev_ptr->dev_resp_l,
2249 dev_ptr->dev_resp_p, psmid);
2252 case DEV_REC_EXCEPTION:
2253 rv = REC_FATAL_ERROR;
2254 z90crypt.terminating = 1;
2255 PRINTKC("Exception in receive from device %d\n",
2267 case DEV_BAD_MESSAGE:
2269 case REC_HARDWAR_ERR:
2271 rv = REC_NO_RESPONSE;
2276 if (dev_ptr->dev_caller_count <= 0) {
2281 list_for_each_safe(ptr, tptr, &dev_ptr->dev_caller_list) {
2282 caller_p = list_entry(ptr, struct caller, caller_liste);
2283 if (!memcmp(caller_p->caller_id, psmid,
2284 sizeof(caller_p->caller_id))) {
2285 if (!list_empty(&caller_p->caller_liste)) {
2287 dev_ptr->dev_caller_count--;
2288 INIT_LIST_HEAD(&caller_p->caller_liste);
2299 PDEBUG("caller_p after successful receive: %p\n", caller_p);
2300 rv = convert_response(dev_ptr->dev_resp_p,
2301 caller_p->caller_buf_p, buff_len_p, buff);
2303 case REC_OPERAND_INV:
2304 PDEBUG("dev %d: user error %d\n", index, rv);
2306 case WRONG_DEVICE_TYPE:
2307 case REC_HARDWAR_ERR:
2308 case REC_BAD_MESSAGE:
2309 PRINTK("dev %d: hardware error %d\n",
2311 rv = REC_NO_RESPONSE;
2314 PDEBUG("dev %d: REC_RELEASED = %d\n",
2318 PDEBUG("dev %d: rv = %d\n", index, rv);
2325 PDEBUG("Successful receive from device %d\n", index);
2326 icaMsg_p = (struct ica_rsa_modexpo *)caller_p->caller_buf_p;
2327 *dest_p_p = icaMsg_p->outputdata;
2328 if (*buff_len_p == 0)
2329 PRINTK("Zero *buff_len_p\n");
2331 case REC_NO_RESPONSE:
2332 remove_device(dev_ptr);
2337 unbuild_caller(dev_ptr, caller_p);
2343 helper_send_work(int index)
2345 struct work_element *rq_p;
2348 if (list_empty(&request_list))
2351 rq_p = list_entry(request_list.next, struct work_element, liste);
2352 list_del(&rq_p->liste);
2353 rq_p->audit[1] |= FP_REMREQUEST;
2354 if (rq_p->devtype == SHRT2DEVPTR(index)->dev_type) {
2355 rq_p->devindex = SHRT2LONG(index);
2356 rv = send_to_crypto_device(rq_p);
2358 rq_p->requestsent = jiffies;
2359 rq_p->audit[0] |= FP_SENT;
2360 list_add_tail(&rq_p->liste, &pending_list);
2362 rq_p->audit[0] |= FP_PENDING;
2365 case REC_OPERAND_INV:
2366 case REC_OPERAND_SIZE:
2368 case REC_INVALID_PAD:
2369 rq_p->retcode = -EINVAL;
2373 case REC_NO_RESPONSE:
2375 if (z90crypt.mask.st_count > 1)
2379 rq_p->retcode = -ENODEV;
2382 rq_p->status[0] |= STAT_FAILED;
2383 rq_p->audit[1] |= FP_AWAKENING;
2384 atomic_set(&rq_p->alarmrung, 1);
2385 wake_up(&rq_p->waitq);
2388 if (z90crypt.mask.st_count > 1)
2389 rq_p->retcode = -ERESTARTSYS;
2391 rq_p->retcode = -ENODEV;
2392 rq_p->status[0] |= STAT_FAILED;
2393 rq_p->audit[1] |= FP_AWAKENING;
2394 atomic_set(&rq_p->alarmrung, 1);
2395 wake_up(&rq_p->waitq);
2400 helper_handle_work_element(int index, unsigned char psmid[8], int rc,
2401 int buff_len, unsigned char *buff,
2402 unsigned char __user *resp_addr)
2404 struct work_element *pq_p;
2405 struct list_head *lptr, *tptr;
2408 list_for_each_safe(lptr, tptr, &pending_list) {
2409 pq_p = list_entry(lptr, struct work_element, liste);
2410 if (!memcmp(pq_p->caller_id, psmid, sizeof(pq_p->caller_id))) {
2413 pq_p->audit[1] |= FP_NOTPENDING;
2420 PRINTK("device %d has work but no caller exists on pending Q\n",
2427 pq_p->resp_buff_size = buff_len;
2428 pq_p->audit[1] |= FP_RESPSIZESET;
2430 pq_p->resp_addr = resp_addr;
2431 pq_p->audit[1] |= FP_RESPADDRCOPIED;
2432 memcpy(pq_p->resp_buff, buff, buff_len);
2433 pq_p->audit[1] |= FP_RESPBUFFCOPIED;
2436 case REC_OPERAND_INV:
2437 case REC_OPERAND_SIZE:
2439 case REC_INVALID_PAD:
2440 PDEBUG("-EINVAL after application error %d\n", rc);
2441 pq_p->retcode = -EINVAL;
2442 pq_p->status[0] |= STAT_FAILED;
2444 case REC_NO_RESPONSE:
2446 if (z90crypt.mask.st_count > 1)
2447 pq_p->retcode = -ERESTARTSYS;
2449 pq_p->retcode = -ENODEV;
2450 pq_p->status[0] |= STAT_FAILED;
2453 if ((pq_p->status[0] != STAT_FAILED) || (pq_p->retcode != -ERELEASED)) {
2454 pq_p->audit[1] |= FP_AWAKENING;
2455 atomic_set(&pq_p->alarmrung, 1);
2456 wake_up(&pq_p->waitq);
2461 * return TRUE if the work element should be removed from the queue
2464 helper_receive_rc(int index, int *rc_p, int *workavail_p)
2468 case REC_OPERAND_INV:
2469 case REC_OPERAND_SIZE:
2471 case REC_INVALID_PAD:
2478 case REC_FATAL_ERROR:
2481 case REC_NO_RESPONSE:
2486 PRINTK("rc %d, device %d\n", *rc_p, SHRT2LONG(index));
2487 *rc_p = REC_NO_RESPONSE;
2495 z90crypt_schedule_reader_timer(void)
2497 if (timer_pending(&reader_timer))
2499 if (mod_timer(&reader_timer, jiffies+(READERTIME*HZ/1000)) != 0)
2500 PRINTK("Timer pending while modifying reader timer\n");
2504 z90crypt_reader_task(unsigned long ptr)
2506 int workavail, remaining, index, rc, buff_len;
2507 unsigned char psmid[8];
2508 unsigned char __user *resp_addr;
2509 static unsigned char buff[1024];
2511 PDEBUG("jiffies %ld\n", jiffies);
2514 * we use workavail = 2 to ensure 2 passes with nothing dequeued before
2515 * exiting the loop. If remaining == 0 after the loop, there is no work
2516 * remaining on the queues.
2525 spin_lock_irq(&queuespinlock);
2526 memset(buff, 0x00, sizeof(buff));
2528 /* Dequeue once from each device in round robin. */
2529 for (index = 0; index < z90crypt.mask.st_count; index++) {
2530 PDEBUG("About to receive.\n");
2531 rc = receive_from_crypto_device(SHRT2LONG(index),
2536 PDEBUG("Dequeued: rc = %d.\n", rc);
2538 if (helper_receive_rc(index, &rc, &workavail)) {
2539 if (rc != REC_NO_RESPONSE) {
2540 helper_send_work(index);
2544 helper_handle_work_element(index, psmid, rc,
2549 if (rc == REC_FATAL_ERROR)
2551 else if (rc != REC_NO_RESPONSE)
2553 SHRT2DEVPTR(index)->dev_caller_count;
2555 spin_unlock_irq(&queuespinlock);
2559 spin_lock_irq(&queuespinlock);
2560 z90crypt_schedule_reader_timer();
2561 spin_unlock_irq(&queuespinlock);
2566 z90crypt_schedule_config_task(unsigned int expiration)
2568 if (timer_pending(&config_timer))
2570 if (mod_timer(&config_timer, jiffies+(expiration*HZ)) != 0)
2571 PRINTK("Timer pending while modifying config timer\n");
2575 z90crypt_config_task(unsigned long ptr)
2579 PDEBUG("jiffies %ld\n", jiffies);
2581 if ((rc = refresh_z90crypt(&z90crypt.cdx)))
2582 PRINTK("Error %d detected in refresh_z90crypt.\n", rc);
2583 /* If return was fatal, don't bother reconfiguring */
2584 if ((rc != TSQ_FATAL_ERROR) && (rc != RSQ_FATAL_ERROR))
2585 z90crypt_schedule_config_task(CONFIGTIME);
2589 z90crypt_schedule_cleanup_task(void)
2591 if (timer_pending(&cleanup_timer))
2593 if (mod_timer(&cleanup_timer, jiffies+(CLEANUPTIME*HZ)) != 0)
2594 PRINTK("Timer pending while modifying cleanup timer\n");
2598 helper_drain_queues(void)
2600 struct work_element *pq_p;
2601 struct list_head *lptr, *tptr;
2603 list_for_each_safe(lptr, tptr, &pending_list) {
2604 pq_p = list_entry(lptr, struct work_element, liste);
2605 pq_p->retcode = -ENODEV;
2606 pq_p->status[0] |= STAT_FAILED;
2607 unbuild_caller(LONG2DEVPTR(pq_p->devindex),
2608 (struct caller *)pq_p->requestptr);
2611 pq_p->audit[1] |= FP_NOTPENDING;
2612 pq_p->audit[1] |= FP_AWAKENING;
2613 atomic_set(&pq_p->alarmrung, 1);
2614 wake_up(&pq_p->waitq);
2617 list_for_each_safe(lptr, tptr, &request_list) {
2618 pq_p = list_entry(lptr, struct work_element, liste);
2619 pq_p->retcode = -ENODEV;
2620 pq_p->status[0] |= STAT_FAILED;
2623 pq_p->audit[1] |= FP_REMREQUEST;
2624 pq_p->audit[1] |= FP_AWAKENING;
2625 atomic_set(&pq_p->alarmrung, 1);
2626 wake_up(&pq_p->waitq);
2631 helper_timeout_requests(void)
2633 struct work_element *pq_p;
2634 struct list_head *lptr, *tptr;
2637 timelimit = jiffies - (CLEANUPTIME * HZ);
2638 /* The list is in strict chronological order */
2639 list_for_each_safe(lptr, tptr, &pending_list) {
2640 pq_p = list_entry(lptr, struct work_element, liste);
2641 if (pq_p->requestsent >= timelimit)
2643 pq_p->retcode = -ETIMEOUT;
2644 pq_p->status[0] |= STAT_FAILED;
2645 /* get this off any caller queue it may be on */
2646 unbuild_caller(LONG2DEVPTR(pq_p->devindex),
2647 (struct caller *) pq_p->requestptr);
2650 pq_p->audit[1] |= FP_TIMEDOUT;
2651 pq_p->audit[1] |= FP_NOTPENDING;
2652 pq_p->audit[1] |= FP_AWAKENING;
2653 atomic_set(&pq_p->alarmrung, 1);
2654 wake_up(&pq_p->waitq);
2658 * If pending count is zero, items left on the request queue may
2659 * never be processed.
2661 if (pendingq_count <= 0) {
2662 list_for_each_safe(lptr, tptr, &request_list) {
2663 pq_p = list_entry(lptr, struct work_element, liste);
2664 if (pq_p->requestsent >= timelimit)
2666 pq_p->retcode = -ETIMEOUT;
2667 pq_p->status[0] |= STAT_FAILED;
2670 pq_p->audit[1] |= FP_TIMEDOUT;
2671 pq_p->audit[1] |= FP_REMREQUEST;
2672 pq_p->audit[1] |= FP_AWAKENING;
2673 atomic_set(&pq_p->alarmrung, 1);
2674 wake_up(&pq_p->waitq);
2680 z90crypt_cleanup_task(unsigned long ptr)
2682 PDEBUG("jiffies %ld\n", jiffies);
2683 spin_lock_irq(&queuespinlock);
2684 if (z90crypt.mask.st_count <= 0) // no devices!
2685 helper_drain_queues();
2687 helper_timeout_requests();
2688 spin_unlock_irq(&queuespinlock);
2689 z90crypt_schedule_cleanup_task();
2693 z90crypt_schedule_reader_task(unsigned long ptr)
2695 tasklet_schedule(&reader_tasklet);
2699 * Lowlevel Functions:
2701 * create_z90crypt: creates and initializes basic data structures
2702 * refresh_z90crypt: re-initializes basic data structures
2703 * find_crypto_devices: returns a count and mask of hardware status
2704 * create_crypto_device: builds the descriptor for a device
2705 * destroy_crypto_device: unallocates the descriptor for a device
2706 * destroy_z90crypt: drains all work, unallocates structs
2710 * build the z90crypt root structure using the given domain index
2713 create_z90crypt(int *cdx_p)
2715 struct hdware_block *hdware_blk_p;
2717 memset(&z90crypt, 0x00, sizeof(struct z90crypt));
2718 z90crypt.domain_established = 0;
2719 z90crypt.len = sizeof(struct z90crypt);
2720 z90crypt.max_count = Z90CRYPT_NUM_DEVS;
2721 z90crypt.cdx = *cdx_p;
2723 hdware_blk_p = (struct hdware_block *)
2724 kmalloc(sizeof(struct hdware_block), GFP_ATOMIC);
2725 if (!hdware_blk_p) {
2726 PDEBUG("kmalloc for hardware block failed\n");
2729 memset(hdware_blk_p, 0x00, sizeof(struct hdware_block));
2730 z90crypt.hdware_info = hdware_blk_p;
2736 helper_scan_devices(int cdx_array[16], int *cdx_p, int *correct_cdx_found)
2738 enum hdstat hd_stat;
2739 int q_depth, dev_type;
2742 q_depth = dev_type = k = 0;
2743 for (i = 0; i < z90crypt.max_count; i++) {
2744 hd_stat = HD_NOT_THERE;
2745 for (j = 0; j <= 15; cdx_array[j++] = -1);
2747 for (j = 0; j <= 15; j++) {
2748 hd_stat = query_online(i, j, MAX_RESET,
2749 &q_depth, &dev_type);
2750 if (hd_stat == HD_TSQ_EXCEPTION) {
2751 z90crypt.terminating = 1;
2752 PRINTKC("exception taken!\n");
2755 if (hd_stat == HD_ONLINE) {
2758 *correct_cdx_found = 1;
2763 if ((*correct_cdx_found == 1) || (k != 0))
2765 if (z90crypt.terminating)
2772 probe_crypto_domain(int *cdx_p)
2775 int correct_cdx_found, k;
2777 correct_cdx_found = 0;
2778 k = helper_scan_devices(cdx_array, cdx_p, &correct_cdx_found);
2780 if (z90crypt.terminating)
2781 return TSQ_FATAL_ERROR;
2783 if (correct_cdx_found)
2792 if ((*cdx_p == -1) || !z90crypt.domain_established) {
2793 *cdx_p = cdx_array[0];
2796 if (*cdx_p != cdx_array[0]) {
2797 PRINTK("incorrect domain: specified = %d, found = %d\n",
2798 *cdx_p, cdx_array[0]);
2799 return Z90C_INCORRECT_DOMAIN;
2803 return Z90C_AMBIGUOUS_DOMAIN;
2807 refresh_z90crypt(int *cdx_p)
2810 struct status local_mask;
2811 struct device *devPtr;
2812 unsigned char oldStat, newStat;
2813 int return_unchanged;
2815 if (z90crypt.len != sizeof(z90crypt))
2817 if (z90crypt.terminating)
2818 return TSQ_FATAL_ERROR;
2820 if (!z90crypt.hdware_info->hdware_mask.st_count &&
2821 !z90crypt.domain_established)
2822 rv = probe_crypto_domain(cdx_p);
2823 if (z90crypt.terminating)
2824 return TSQ_FATAL_ERROR;
2827 case Z90C_AMBIGUOUS_DOMAIN:
2828 PRINTK("ambiguous domain detected\n");
2830 case Z90C_INCORRECT_DOMAIN:
2831 PRINTK("incorrect domain specified\n");
2834 PRINTK("probe domain returned %d\n", rv);
2840 z90crypt.cdx = *cdx_p;
2841 z90crypt.domain_established = 1;
2843 rv = find_crypto_devices(&local_mask);
2845 PRINTK("find crypto devices returned %d\n", rv);
2848 if (!memcmp(&local_mask, &z90crypt.hdware_info->hdware_mask,
2849 sizeof(struct status))) {
2850 return_unchanged = 1;
2851 for (i = 0; i < Z90CRYPT_NUM_TYPES; i++) {
2853 * Check for disabled cards. If any device is marked
2854 * disabled, destroy it.
2857 j < z90crypt.hdware_info->type_mask[i].st_count;
2859 indx = z90crypt.hdware_info->type_x_addr[i].
2861 devPtr = z90crypt.device_p[indx];
2862 if (devPtr && devPtr->disabled) {
2863 local_mask.st_mask[indx] = HD_NOT_THERE;
2864 return_unchanged = 0;
2868 if (return_unchanged == 1)
2872 spin_lock_irq(&queuespinlock);
2873 for (i = 0; i < z90crypt.max_count; i++) {
2874 oldStat = z90crypt.hdware_info->hdware_mask.st_mask[i];
2875 newStat = local_mask.st_mask[i];
2876 if ((oldStat == HD_ONLINE) && (newStat != HD_ONLINE))
2877 destroy_crypto_device(i);
2878 else if ((oldStat != HD_ONLINE) && (newStat == HD_ONLINE)) {
2879 rv = create_crypto_device(i);
2880 if (rv >= REC_FATAL_ERROR)
2883 local_mask.st_mask[i] = HD_NOT_THERE;
2884 local_mask.st_count--;
2888 memcpy(z90crypt.hdware_info->hdware_mask.st_mask, local_mask.st_mask,
2889 sizeof(local_mask.st_mask));
2890 z90crypt.hdware_info->hdware_mask.st_count = local_mask.st_count;
2891 z90crypt.hdware_info->hdware_mask.disabled_count =
2892 local_mask.disabled_count;
2893 refresh_index_array(&z90crypt.mask, &z90crypt.overall_device_x);
2894 for (i = 0; i < Z90CRYPT_NUM_TYPES; i++)
2895 refresh_index_array(&(z90crypt.hdware_info->type_mask[i]),
2896 &(z90crypt.hdware_info->type_x_addr[i]));
2897 spin_unlock_irq(&queuespinlock);
2903 find_crypto_devices(struct status *deviceMask)
2905 int i, q_depth, dev_type;
2906 enum hdstat hd_stat;
2908 deviceMask->st_count = 0;
2909 deviceMask->disabled_count = 0;
2910 deviceMask->user_disabled_count = 0;
2912 for (i = 0; i < z90crypt.max_count; i++) {
2913 hd_stat = query_online(i, z90crypt.cdx, MAX_RESET, &q_depth,
2915 if (hd_stat == HD_TSQ_EXCEPTION) {
2916 z90crypt.terminating = 1;
2917 PRINTKC("Exception during probe for crypto devices\n");
2918 return TSQ_FATAL_ERROR;
2920 deviceMask->st_mask[i] = hd_stat;
2921 if (hd_stat == HD_ONLINE) {
2922 PDEBUG("Got an online crypto!: %d\n", i);
2923 PDEBUG("Got a queue depth of %d\n", q_depth);
2924 PDEBUG("Got a device type of %d\n", dev_type);
2926 return TSQ_FATAL_ERROR;
2927 deviceMask->st_count++;
2928 z90crypt.q_depth_array[i] = q_depth;
2929 z90crypt.dev_type_array[i] = dev_type;
2937 refresh_index_array(struct status *status_str, struct device_x *index_array)
2945 stat = status_str->st_mask[++i];
2946 if (stat == DEV_ONLINE)
2947 index_array->device_index[count++] = i;
2948 } while ((i < Z90CRYPT_NUM_DEVS) && (count < status_str->st_count));
2954 create_crypto_device(int index)
2956 int rv, devstat, total_size;
2957 struct device *dev_ptr;
2958 struct status *type_str_p;
2961 dev_ptr = z90crypt.device_p[index];
2963 total_size = sizeof(struct device) +
2964 z90crypt.q_depth_array[index] * sizeof(int);
2966 dev_ptr = (struct device *) kmalloc(total_size, GFP_ATOMIC);
2968 PRINTK("kmalloc device %d failed\n", index);
2971 memset(dev_ptr, 0, total_size);
2972 dev_ptr->dev_resp_p = kmalloc(MAX_RESPONSE_SIZE, GFP_ATOMIC);
2973 if (!dev_ptr->dev_resp_p) {
2975 PRINTK("kmalloc device %d rec buffer failed\n", index);
2978 dev_ptr->dev_resp_l = MAX_RESPONSE_SIZE;
2979 INIT_LIST_HEAD(&(dev_ptr->dev_caller_list));
2982 devstat = reset_device(index, z90crypt.cdx, MAX_RESET);
2983 if (devstat == DEV_RSQ_EXCEPTION) {
2984 PRINTK("exception during reset device %d\n", index);
2985 kfree(dev_ptr->dev_resp_p);
2987 return RSQ_FATAL_ERROR;
2989 if (devstat == DEV_ONLINE) {
2990 dev_ptr->dev_self_x = index;
2991 dev_ptr->dev_type = z90crypt.dev_type_array[index];
2992 if (dev_ptr->dev_type == NILDEV) {
2993 rv = probe_device_type(dev_ptr);
2995 PRINTK("rv = %d from probe_device_type %d\n",
2997 kfree(dev_ptr->dev_resp_p);
3002 deviceType = dev_ptr->dev_type;
3003 z90crypt.dev_type_array[index] = deviceType;
3004 if (deviceType == PCICA)
3005 z90crypt.hdware_info->device_type_array[index] = 1;
3006 else if (deviceType == PCICC)
3007 z90crypt.hdware_info->device_type_array[index] = 2;
3008 else if (deviceType == PCIXCC)
3009 z90crypt.hdware_info->device_type_array[index] = 3;
3011 z90crypt.hdware_info->device_type_array[index] = -1;
3015 * 'q_depth' returned by the hardware is one less than
3018 dev_ptr->dev_q_depth = z90crypt.q_depth_array[index];
3019 dev_ptr->dev_type = z90crypt.dev_type_array[index];
3020 dev_ptr->dev_stat = devstat;
3021 dev_ptr->disabled = 0;
3022 z90crypt.device_p[index] = dev_ptr;
3024 if (devstat == DEV_ONLINE) {
3025 if (z90crypt.mask.st_mask[index] != DEV_ONLINE) {
3026 z90crypt.mask.st_mask[index] = DEV_ONLINE;
3027 z90crypt.mask.st_count++;
3029 deviceType = dev_ptr->dev_type;
3030 type_str_p = &z90crypt.hdware_info->type_mask[deviceType];
3031 if (type_str_p->st_mask[index] != DEV_ONLINE) {
3032 type_str_p->st_mask[index] = DEV_ONLINE;
3033 type_str_p->st_count++;
3041 destroy_crypto_device(int index)
3043 struct device *dev_ptr;
3044 int t, disabledFlag;
3046 dev_ptr = z90crypt.device_p[index];
3048 /* remember device type; get rid of device struct */
3050 disabledFlag = dev_ptr->disabled;
3051 t = dev_ptr->dev_type;
3052 if (dev_ptr->dev_resp_p)
3053 kfree(dev_ptr->dev_resp_p);
3059 z90crypt.device_p[index] = 0;
3061 /* if the type is valid, remove the device from the type_mask */
3062 if ((t != -1) && z90crypt.hdware_info->type_mask[t].st_mask[index]) {
3063 z90crypt.hdware_info->type_mask[t].st_mask[index] = 0x00;
3064 z90crypt.hdware_info->type_mask[t].st_count--;
3065 if (disabledFlag == 1)
3066 z90crypt.hdware_info->type_mask[t].disabled_count--;
3068 if (z90crypt.mask.st_mask[index] != DEV_GONE) {
3069 z90crypt.mask.st_mask[index] = DEV_GONE;
3070 z90crypt.mask.st_count--;
3072 z90crypt.hdware_info->device_type_array[index] = 0;
3078 destroy_z90crypt(void)
3081 for (i = 0; i < z90crypt.max_count; i++)
3082 if (z90crypt.device_p[i])
3083 destroy_crypto_device(i);
3084 if (z90crypt.hdware_info)
3085 kfree((void *)z90crypt.hdware_info);
3086 memset((void *)&z90crypt, 0, sizeof(z90crypt));
3089 static unsigned char static_testmsg[] = {
3090 0x00,0x00,0x00,0x00,0x01,0x02,0x03,0x04,0x05,0x06,0x07,0x08,0x00,0x06,0x00,0x00,
3091 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x58,
3092 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x01,0x00,0x43,0x43,
3093 0x41,0x2d,0x41,0x50,0x50,0x4c,0x20,0x20,0x20,0x01,0x01,0x01,0x00,0x00,0x00,0x00,
3094 0x50,0x4b,0x00,0x00,0x00,0x00,0x01,0x1c,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
3095 0x00,0x00,0x00,0x00,0x00,0x00,0x05,0xb8,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
3096 0x00,0x00,0x00,0x00,0x70,0x00,0x41,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x54,0x32,
3097 0x01,0x00,0xa0,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
3098 0xb8,0x05,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
3099 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
3100 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
3101 0x00,0x00,0x00,0x00,0x00,0x00,0x0a,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
3102 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x08,0x00,0x49,0x43,0x53,0x46,
3103 0x20,0x20,0x20,0x20,0x50,0x4b,0x0a,0x00,0x50,0x4b,0x43,0x53,0x2d,0x31,0x2e,0x32,
3104 0x37,0x00,0x11,0x22,0x33,0x44,0x55,0x66,0x77,0x88,0x99,0x00,0x11,0x22,0x33,0x44,
3105 0x55,0x66,0x77,0x88,0x99,0x00,0x11,0x22,0x33,0x44,0x55,0x66,0x77,0x88,0x99,0x00,
3106 0x11,0x22,0x33,0x44,0x55,0x66,0x77,0x88,0x99,0x00,0x11,0x22,0x33,0x44,0x55,0x66,
3107 0x77,0x88,0x99,0x00,0x11,0x22,0x33,0x5d,0x00,0x5b,0x00,0x77,0x88,0x1e,0x00,0x00,
3108 0x57,0x00,0x00,0x00,0x00,0x04,0x00,0x00,0x4f,0x00,0x00,0x00,0x03,0x02,0x00,0x00,
3109 0x40,0x01,0x00,0x01,0xce,0x02,0x68,0x2d,0x5f,0xa9,0xde,0x0c,0xf6,0xd2,0x7b,0x58,
3110 0x4b,0xf9,0x28,0x68,0x3d,0xb4,0xf4,0xef,0x78,0xd5,0xbe,0x66,0x63,0x42,0xef,0xf8,
3111 0xfd,0xa4,0xf8,0xb0,0x8e,0x29,0xc2,0xc9,0x2e,0xd8,0x45,0xb8,0x53,0x8c,0x6f,0x4e,
3112 0x72,0x8f,0x6c,0x04,0x9c,0x88,0xfc,0x1e,0xc5,0x83,0x55,0x57,0xf7,0xdd,0xfd,0x4f,
3113 0x11,0x36,0x95,0x5d,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
3117 probe_device_type(struct device *devPtr)
3119 int rv, dv, i, index, length;
3120 unsigned char psmid[8];
3121 static unsigned char loc_testmsg[384];
3123 index = devPtr->dev_self_x;
3126 memcpy(loc_testmsg, static_testmsg, sizeof(static_testmsg));
3127 length = sizeof(static_testmsg) - 24;
3128 /* the -24 allows for the header */
3129 dv = send_to_AP(index, z90crypt.cdx, length, loc_testmsg);
3131 PDEBUG("dv returned by send during probe: %d\n", dv);
3132 if (dv == DEV_SEN_EXCEPTION) {
3133 rv = SEN_FATAL_ERROR;
3134 PRINTKC("exception in send to AP %d\n", index);
3137 PDEBUG("return value from send_to_AP: %d\n", rv);
3140 PDEBUG("dev %d not available\n", index);
3150 rv = SEN_FATAL_ERROR;
3152 case DEV_BAD_MESSAGE:
3153 rv = SEN_USER_ERROR;
3155 case DEV_QUEUE_FULL:
3156 rv = SEN_QUEUE_FULL;
3159 PRINTK("unknown dv=%d for dev %d\n", dv, index);
3168 for (i = 0; i < 6; i++) {
3170 dv = receive_from_AP(index, z90crypt.cdx,
3172 devPtr->dev_resp_p, psmid);
3173 PDEBUG("dv returned by DQ = %d\n", dv);
3174 if (dv == DEV_REC_EXCEPTION) {
3175 rv = REC_FATAL_ERROR;
3176 PRINTKC("exception in dequeue %d\n",
3190 case DEV_BAD_MESSAGE:
3193 rv = REC_NO_RESPONSE;
3196 if ((rv != 0) && (rv != REC_NO_WORK))
3203 rv = (devPtr->dev_resp_p[0] == 0x00) &&
3204 (devPtr->dev_resp_p[1] == 0x86);
3206 devPtr->dev_type = PCICC;
3208 devPtr->dev_type = PCICA;
3211 /* In a general error case, the card is not marked online */
3215 #ifdef Z90CRYPT_USE_HOTPLUG
3217 z90crypt_hotplug_event(int dev_major, int dev_minor, int action)
3219 #ifdef CONFIG_HOTPLUG
3225 sprintf(major, "MAJOR=%d", dev_major);
3226 sprintf(minor, "MINOR=%d", dev_minor);
3228 argv[0] = hotplug_path;
3229 argv[1] = "z90crypt";
3233 envp[1] = "PATH=/sbin:/bin:/usr/sbin:/usr/bin";
3236 case Z90CRYPT_HOTPLUG_ADD:
3237 envp[2] = "ACTION=add";
3239 case Z90CRYPT_HOTPLUG_REMOVE:
3240 envp[2] = "ACTION=remove";
3249 call_usermodehelper(argv[0], argv, envp, 0);
3254 module_init(z90crypt_init_module);
3255 module_exit(z90crypt_cleanup_module);