2 * csr1212.c -- IEEE 1212 Control and Status Register support for Linux
4 * Copyright (C) 2003 Francois Retief <fgretief@sun.ac.za>
5 * Steve Kinneberg <kinnebergsteve@acmsystems.com>
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions are met:
10 * 1. Redistributions of source code must retain the above copyright notice,
11 * this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * 3. The name of the author may not be used to endorse or promote products
16 * derived from this software without specific prior written permission.
18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
19 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
20 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
21 * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
22 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
23 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
24 * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
25 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
26 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
27 * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32 * - Verify interface consistency: i.e., public functions that take a size
33 * parameter expect size to be in bytes.
34 * - Convenience functions for reading a block of data from a given offset.
44 /* Permitted key type for each key id */
45 #define __I (1 << CSR1212_KV_TYPE_IMMEDIATE)
46 #define __C (1 << CSR1212_KV_TYPE_CSR_OFFSET)
47 #define __D (1 << CSR1212_KV_TYPE_DIRECTORY)
48 #define __L (1 << CSR1212_KV_TYPE_LEAF)
49 static const u_int8_t csr1212_key_id_type_map[0x30] = {
51 __D | __L, /* Descriptor */
52 __I | __D | __L, /* Bus_Dependent_Info */
53 __I | __D | __L, /* Vendor */
54 __I, /* Hardware_Version */
56 __D | __L, /* Module */
57 0, 0, 0, 0, /* Reserved */
58 __I, /* Node_Capabilities */
60 0, 0, 0, /* Reserved */
62 __I, /* Specifier_ID */
64 __I | __C | __D | __L, /* Dependent_Info */
65 __L, /* Unit_Location */
71 __L, /* Extended_ROM */
72 __I, /* Extended_Key_Specifier_ID */
73 __I, /* Extended_Key */
74 __I | __C | __D | __L, /* Extended_Data */
75 __L, /* Modifiable_Descriptor */
76 __I, /* Directory_ID */
85 #define quads_to_bytes(_q) ((_q) * sizeof(u_int32_t))
86 #define bytes_to_quads(_b) (((_b) + sizeof(u_int32_t) - 1) / sizeof(u_int32_t))
88 static inline void free_keyval(struct csr1212_keyval *kv)
90 if (kv->key.type == CSR1212_KV_TYPE_LEAF)
91 CSR1212_FREE(kv->value.leaf.data);
96 static u_int16_t csr1212_crc16(const u_int32_t *buffer, size_t length)
100 u_int16_t sum, crc = 0;
102 for (; length; length--) {
103 data = CSR1212_BE32_TO_CPU(*buffer);
105 for (shift = 28; shift >= 0; shift -= 4 ) {
106 sum = ((crc >> 12) ^ (data >> shift)) & 0xf;
107 crc = (crc << 4) ^ (sum << 12) ^ (sum << 5) ^ (sum);
112 return CSR1212_CPU_TO_BE16(crc);
116 /* Microsoft computes the CRC with the bytes in reverse order. Therefore we
117 * have a special version of the CRC algorithm to account for their buggy
119 static u_int16_t csr1212_msft_crc16(const u_int32_t *buffer, size_t length)
123 u_int16_t sum, crc = 0;
125 for (; length; length--) {
126 data = CSR1212_LE32_TO_CPU(*buffer);
128 for (shift = 28; shift >= 0; shift -= 4 ) {
129 sum = ((crc >> 12) ^ (data >> shift)) & 0xf;
130 crc = (crc << 4) ^ (sum << 12) ^ (sum << 5) ^ (sum);
135 return CSR1212_CPU_TO_BE16(crc);
139 static inline struct csr1212_dentry *csr1212_find_keyval(struct csr1212_keyval *dir,
140 struct csr1212_keyval *kv)
142 struct csr1212_dentry *pos;
144 for (pos = dir->value.directory.dentries_head;
145 pos != NULL; pos = pos->next) {
153 static inline struct csr1212_keyval *csr1212_find_keyval_offset(struct csr1212_keyval *kv_list,
156 struct csr1212_keyval *kv;
158 for (kv = kv_list; kv != NULL; kv = kv->next) {
159 if (kv->offset == offset)
166 /* Creation Routines */
167 struct csr1212_csr *csr1212_create_csr(struct csr1212_bus_ops *ops,
168 size_t bus_info_size, void *private)
170 struct csr1212_csr *csr;
172 csr = CSR1212_MALLOC(sizeof(*csr));
177 csr1212_rom_cache_malloc(CSR1212_CONFIG_ROM_SPACE_OFFSET,
178 CSR1212_CONFIG_ROM_SPACE_SIZE);
179 if (!csr->cache_head) {
184 /* The keyval key id is not used for the root node, but a valid key id
185 * that can be used for a directory needs to be passed to
186 * csr1212_new_directory(). */
187 csr->root_kv = csr1212_new_directory(CSR1212_KV_ID_VENDOR);
189 CSR1212_FREE(csr->cache_head);
194 csr->bus_info_data = csr->cache_head->data;
195 csr->bus_info_len = bus_info_size;
196 csr->crc_len = bus_info_size;
198 csr->private = private;
199 csr->cache_tail = csr->cache_head;
206 void csr1212_init_local_csr(struct csr1212_csr *csr,
207 const u_int32_t *bus_info_data, int max_rom)
209 static const int mr_map[] = { 4, 64, 1024, 0 };
211 csr->max_rom = mr_map[max_rom];
212 memcpy(csr->bus_info_data, bus_info_data, csr->bus_info_len);
216 static struct csr1212_keyval *csr1212_new_keyval(u_int8_t type, u_int8_t key)
218 struct csr1212_keyval *kv;
220 if (key < 0x30 && ((csr1212_key_id_type_map[key] & (1 << type)) == 0))
223 kv = CSR1212_MALLOC(sizeof(*kv));
230 kv->associate = NULL;
240 struct csr1212_keyval *csr1212_new_immediate(u_int8_t key, u_int32_t value)
242 struct csr1212_keyval *kv = csr1212_new_keyval(CSR1212_KV_TYPE_IMMEDIATE, key);
247 kv->value.immediate = value;
252 struct csr1212_keyval *csr1212_new_leaf(u_int8_t key, const void *data, size_t data_len)
254 struct csr1212_keyval *kv = csr1212_new_keyval(CSR1212_KV_TYPE_LEAF, key);
260 kv->value.leaf.data = CSR1212_MALLOC(data_len);
261 if (!kv->value.leaf.data) {
267 memcpy(kv->value.leaf.data, data, data_len);
269 kv->value.leaf.data = NULL;
272 kv->value.leaf.len = bytes_to_quads(data_len);
279 struct csr1212_keyval *csr1212_new_csr_offset(u_int8_t key, u_int32_t csr_offset)
281 struct csr1212_keyval *kv = csr1212_new_keyval(CSR1212_KV_TYPE_CSR_OFFSET, key);
286 kv->value.csr_offset = csr_offset;
293 struct csr1212_keyval *csr1212_new_directory(u_int8_t key)
295 struct csr1212_keyval *kv = csr1212_new_keyval(CSR1212_KV_TYPE_DIRECTORY, key);
300 kv->value.directory.len = 0;
302 kv->value.directory.dentries_head = NULL;
303 kv->value.directory.dentries_tail = NULL;
308 int csr1212_associate_keyval(struct csr1212_keyval *kv,
309 struct csr1212_keyval *associate)
311 if (!kv || !associate)
312 return CSR1212_EINVAL;
314 if (kv->key.id == CSR1212_KV_ID_DESCRIPTOR ||
315 (associate->key.id != CSR1212_KV_ID_DESCRIPTOR &&
316 associate->key.id != CSR1212_KV_ID_DEPENDENT_INFO &&
317 associate->key.id != CSR1212_KV_ID_EXTENDED_KEY &&
318 associate->key.id != CSR1212_KV_ID_EXTENDED_DATA &&
319 associate->key.id < 0x30))
320 return CSR1212_EINVAL;
322 if (kv->key.id == CSR1212_KV_ID_EXTENDED_KEY_SPECIFIER_ID &&
323 associate->key.id != CSR1212_KV_ID_EXTENDED_KEY)
324 return CSR1212_EINVAL;
326 if (kv->key.id == CSR1212_KV_ID_EXTENDED_KEY &&
327 associate->key.id != CSR1212_KV_ID_EXTENDED_DATA)
328 return CSR1212_EINVAL;
330 if (associate->key.id == CSR1212_KV_ID_EXTENDED_KEY &&
331 kv->key.id != CSR1212_KV_ID_EXTENDED_KEY_SPECIFIER_ID)
332 return CSR1212_EINVAL;
334 if (associate->key.id == CSR1212_KV_ID_EXTENDED_DATA &&
335 kv->key.id != CSR1212_KV_ID_EXTENDED_KEY)
336 return CSR1212_EINVAL;
339 csr1212_release_keyval(kv->associate);
342 kv->associate = associate;
344 return CSR1212_SUCCESS;
347 int csr1212_attach_keyval_to_directory(struct csr1212_keyval *dir,
348 struct csr1212_keyval *kv)
350 struct csr1212_dentry *dentry;
352 if (!kv || !dir || dir->key.type != CSR1212_KV_TYPE_DIRECTORY)
353 return CSR1212_EINVAL;
355 dentry = CSR1212_MALLOC(sizeof(*dentry));
357 return CSR1212_ENOMEM;
364 dentry->prev = dir->value.directory.dentries_tail;
366 if (!dir->value.directory.dentries_head)
367 dir->value.directory.dentries_head = dentry;
369 if (dir->value.directory.dentries_tail)
370 dir->value.directory.dentries_tail->next = dentry;
371 dir->value.directory.dentries_tail = dentry;
373 return CSR1212_SUCCESS;
376 struct csr1212_keyval *csr1212_new_extended_immediate(u_int32_t spec, u_int32_t key,
379 struct csr1212_keyval *kvs, *kvk, *kvv;
381 kvs = csr1212_new_immediate(CSR1212_KV_ID_EXTENDED_KEY_SPECIFIER_ID, spec);
382 kvk = csr1212_new_immediate(CSR1212_KV_ID_EXTENDED_KEY, key);
383 kvv = csr1212_new_immediate(CSR1212_KV_ID_EXTENDED_DATA, value);
385 if (!kvs || !kvk || !kvv) {
395 /* Don't keep a local reference to the extended key or value. */
399 csr1212_associate_keyval(kvk, kvv);
400 csr1212_associate_keyval(kvs, kvk);
405 struct csr1212_keyval *csr1212_new_extended_leaf(u_int32_t spec, u_int32_t key,
406 const void *data, size_t data_len)
408 struct csr1212_keyval *kvs, *kvk, *kvv;
410 kvs = csr1212_new_immediate(CSR1212_KV_ID_EXTENDED_KEY_SPECIFIER_ID, spec);
411 kvk = csr1212_new_immediate(CSR1212_KV_ID_EXTENDED_KEY, key);
412 kvv = csr1212_new_leaf(CSR1212_KV_ID_EXTENDED_DATA, data, data_len);
414 if (!kvs || !kvk || !kvv) {
424 /* Don't keep a local reference to the extended key or value. */
428 csr1212_associate_keyval(kvk, kvv);
429 csr1212_associate_keyval(kvs, kvk);
434 struct csr1212_keyval *csr1212_new_descriptor_leaf(u_int8_t dtype, u_int32_t specifier_id,
435 const void *data, size_t data_len)
437 struct csr1212_keyval *kv;
439 kv = csr1212_new_leaf(CSR1212_KV_ID_DESCRIPTOR, NULL,
440 data_len + CSR1212_DESCRIPTOR_LEAF_OVERHEAD);
444 CSR1212_DESCRIPTOR_LEAF_SET_TYPE(kv, dtype);
445 CSR1212_DESCRIPTOR_LEAF_SET_SPECIFIER_ID(kv, specifier_id);
448 memcpy(CSR1212_DESCRIPTOR_LEAF_DATA(kv), data, data_len);
455 struct csr1212_keyval *csr1212_new_textual_descriptor_leaf(u_int8_t cwidth,
461 struct csr1212_keyval *kv;
464 kv = csr1212_new_descriptor_leaf(0, 0, NULL, data_len +
465 CSR1212_TEXTUAL_DESCRIPTOR_LEAF_OVERHEAD);
469 CSR1212_TEXTUAL_DESCRIPTOR_LEAF_SET_WIDTH(kv, cwidth);
470 CSR1212_TEXTUAL_DESCRIPTOR_LEAF_SET_CHAR_SET(kv, cset);
471 CSR1212_TEXTUAL_DESCRIPTOR_LEAF_SET_LANGUAGE(kv, language);
473 lstr = (char*)CSR1212_TEXTUAL_DESCRIPTOR_LEAF_DATA(kv);
475 /* make sure last quadlet is zeroed out */
476 *((u_int32_t*)&(lstr[(data_len - 1) & ~0x3])) = 0;
478 /* don't copy the NUL terminator */
479 memcpy(lstr, data, data_len);
484 static int csr1212_check_minimal_ascii(const char *s)
486 static const char minimal_ascii_table[] = {
487 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x07,
488 0x00, 0x00, 0x0a, 0x00, 0x0C, 0x0D, 0x00, 0x00,
489 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
490 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
491 0x20, 0x21, 0x22, 0x00, 0x00, 0x25, 0x26, 0x27,
492 0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f,
493 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37,
494 0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f,
495 0x40, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47,
496 0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f,
497 0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57,
498 0x58, 0x59, 0x5a, 0x00, 0x00, 0x00, 0x00, 0x5f,
499 0x00, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67,
500 0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f,
501 0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77,
502 0x78, 0x79, 0x7a, 0x00, 0x00, 0x00, 0x00, 0x00,
505 if (minimal_ascii_table[*s & 0x7F] != *s)
506 return -1; /* failed */
508 /* String conforms to minimal-ascii, as specified by IEEE 1212,
513 struct csr1212_keyval *csr1212_new_string_descriptor_leaf(const char *s)
515 /* Check if string conform to minimal_ascii format */
516 if (csr1212_check_minimal_ascii(s))
519 /* IEEE 1212, par. 7.5.4.1 Textual descriptors (minimal ASCII) */
520 return csr1212_new_textual_descriptor_leaf(0, 0, 0, s, strlen(s));
523 struct csr1212_keyval *csr1212_new_icon_descriptor_leaf(u_int32_t version,
524 u_int8_t palette_depth,
525 u_int8_t color_space,
532 static const int pd[4] = { 0, 4, 16, 256 };
533 static const int cs[16] = { 4, 2 };
534 struct csr1212_keyval *kv;
535 int palette_size = pd[palette_depth] * cs[color_space];
536 int pixel_size = (hscan * vscan + 3) & ~0x3;
538 if ((palette_depth && !palette) || !pixels)
541 kv = csr1212_new_descriptor_leaf(1, 0, NULL,
542 palette_size + pixel_size +
543 CSR1212_ICON_DESCRIPTOR_LEAF_OVERHEAD);
547 CSR1212_ICON_DESCRIPTOR_LEAF_SET_VERSION(kv, version);
548 CSR1212_ICON_DESCRIPTOR_LEAF_SET_PALETTE_DEPTH(kv, palette_depth);
549 CSR1212_ICON_DESCRIPTOR_LEAF_SET_COLOR_SPACE(kv, color_space);
550 CSR1212_ICON_DESCRIPTOR_LEAF_SET_LANGUAGE(kv, language);
551 CSR1212_ICON_DESCRIPTOR_LEAF_SET_HSCAN(kv, hscan);
552 CSR1212_ICON_DESCRIPTOR_LEAF_SET_VSCAN(kv, vscan);
555 memcpy(CSR1212_ICON_DESCRIPTOR_LEAF_PALETTE(kv), palette,
558 memcpy(CSR1212_ICON_DESCRIPTOR_LEAF_PIXELS(kv), pixels, pixel_size);
563 struct csr1212_keyval *csr1212_new_modifiable_descriptor_leaf(u_int16_t max_size,
566 struct csr1212_keyval *kv;
568 /* IEEE 1212, par. 7.5.4.3 Modifiable descriptors */
569 kv = csr1212_new_leaf(CSR1212_KV_ID_MODIFIABLE_DESCRIPTOR, NULL, sizeof(u_int64_t));
573 CSR1212_MODIFIABLE_DESCRIPTOR_SET_MAX_SIZE(kv, max_size);
574 CSR1212_MODIFIABLE_DESCRIPTOR_SET_ADDRESS_HI(kv, address);
575 CSR1212_MODIFIABLE_DESCRIPTOR_SET_ADDRESS_LO(kv, address);
580 static int csr1212_check_keyword(const char *s)
584 if (('A' <= *s) && (*s <= 'Z'))
586 if (('0' <= *s) && (*s <= '9'))
591 return -1; /* failed */
593 /* String conforms to keyword, as specified by IEEE 1212,
595 return CSR1212_SUCCESS;
598 struct csr1212_keyval *csr1212_new_keyword_leaf(int strc, const char *strv[])
600 struct csr1212_keyval *kv;
604 /* Check all keywords to see if they conform to restrictions:
605 * Only the following characters is allowed ['A'..'Z','0'..'9','-']
606 * Each word is zero-terminated.
607 * Also calculate the total length of the keywords.
609 for (i = 0; i < strc; i++) {
610 if (!strv[i] || csr1212_check_keyword(strv[i])) {
613 data_len += strlen(strv[i]) + 1; /* Add zero-termination char. */
616 /* IEEE 1212, par. 7.6.5 Keyword leaves */
617 kv = csr1212_new_leaf(CSR1212_KV_ID_KEYWORD, NULL, data_len);
621 buffer = (char *)kv->value.leaf.data;
623 /* make sure last quadlet is zeroed out */
624 *((u_int32_t*)&(buffer[(data_len - 1) & ~0x3])) = 0;
626 /* Copy keyword(s) into leaf data buffer */
627 for (i = 0; i < strc; i++) {
628 int len = strlen(strv[i]) + 1;
629 memcpy(buffer, strv[i], len);
636 /* Destruction Routines */
638 void csr1212_detach_keyval_from_directory(struct csr1212_keyval *dir,
639 struct csr1212_keyval *kv)
641 struct csr1212_dentry *dentry;
643 if (!kv || !dir || dir->key.type != CSR1212_KV_TYPE_DIRECTORY)
646 dentry = csr1212_find_keyval(dir, kv);
652 dentry->prev->next = dentry->next;
654 dentry->next->prev = dentry->prev;
655 if (dir->value.directory.dentries_head == dentry)
656 dir->value.directory.dentries_head = dentry->next;
657 if (dir->value.directory.dentries_tail == dentry)
658 dir->value.directory.dentries_tail = dentry->prev;
660 CSR1212_FREE(dentry);
662 csr1212_release_keyval(kv);
666 void csr1212_disassociate_keyval(struct csr1212_keyval *kv)
669 csr1212_release_keyval(kv->associate);
672 kv->associate = NULL;
676 /* This function is used to free the memory taken by a keyval. If the given
677 * keyval is a directory type, then any keyvals contained in that directory
678 * will be destroyed as well if their respective refcnts are 0. By means of
679 * list manipulation, this routine will descend a directory structure in a
680 * non-recursive manner. */
681 void _csr1212_destroy_keyval(struct csr1212_keyval *kv)
683 struct csr1212_keyval *k, *a;
684 struct csr1212_dentry dentry;
685 struct csr1212_dentry *head, *tail;
705 if (k->key.type == CSR1212_KV_TYPE_DIRECTORY) {
706 /* If the current entry is a directory, then move all
707 * the entries to the destruction list. */
708 if (k->value.directory.dentries_head) {
709 tail->next = k->value.directory.dentries_head;
710 k->value.directory.dentries_head->prev = tail;
711 tail = k->value.directory.dentries_tail;
720 if (head->prev && head->prev != &dentry) {
721 CSR1212_FREE(head->prev);
724 } else if (tail != &dentry)
730 void csr1212_destroy_csr(struct csr1212_csr *csr)
732 struct csr1212_csr_rom_cache *c, *oc;
733 struct csr1212_cache_region *cr, *ocr;
735 csr1212_release_keyval(csr->root_kv);
755 /* CSR Image Creation */
757 static int csr1212_append_new_cache(struct csr1212_csr *csr, size_t romsize)
759 struct csr1212_csr_rom_cache *cache;
762 if (!csr || !csr->ops->allocate_addr_range ||
763 !csr->ops->release_addr)
764 return CSR1212_ENOMEM;
766 /* ROM size must be a multiple of csr->max_rom */
767 romsize = (romsize + (csr->max_rom - 1)) & ~(csr->max_rom - 1);
769 csr_addr = csr->ops->allocate_addr_range(romsize, csr->max_rom, csr->private);
770 if (csr_addr == ~0ULL) {
771 return CSR1212_ENOMEM;
773 if (csr_addr < CSR1212_REGISTER_SPACE_BASE) {
774 /* Invalid address returned from allocate_addr_range(). */
775 csr->ops->release_addr(csr_addr, csr->private);
776 return CSR1212_ENOMEM;
779 cache = csr1212_rom_cache_malloc(csr_addr - CSR1212_REGISTER_SPACE_BASE, romsize);
781 csr->ops->release_addr(csr_addr, csr->private);
782 return CSR1212_ENOMEM;
785 cache->ext_rom = csr1212_new_keyval(CSR1212_KV_TYPE_LEAF, CSR1212_KV_ID_EXTENDED_ROM);
786 if (!cache->ext_rom) {
787 csr->ops->release_addr(csr_addr, csr->private);
789 return CSR1212_ENOMEM;
792 if (csr1212_attach_keyval_to_directory(csr->root_kv, cache->ext_rom) != CSR1212_SUCCESS) {
793 csr1212_release_keyval(cache->ext_rom);
794 csr->ops->release_addr(csr_addr, csr->private);
796 return CSR1212_ENOMEM;
798 cache->ext_rom->offset = csr_addr - CSR1212_REGISTER_SPACE_BASE;
799 cache->ext_rom->value.leaf.len = 0;
801 /* Add cache to tail of cache list */
802 cache->prev = csr->cache_tail;
803 csr->cache_tail->next = cache;
804 csr->cache_tail = cache;
805 return CSR1212_SUCCESS;
808 static inline void csr1212_remove_cache(struct csr1212_csr *csr,
809 struct csr1212_csr_rom_cache *cache)
811 if (csr->cache_head == cache)
812 csr->cache_head = cache->next;
813 if (csr->cache_tail == cache)
814 csr->cache_tail = cache->prev;
817 cache->prev->next = cache->next;
819 cache->next->prev = cache->prev;
821 if (cache->ext_rom) {
822 csr1212_detach_keyval_from_directory(csr->root_kv, cache->ext_rom);
823 csr1212_release_keyval(cache->ext_rom);
829 static int csr1212_generate_layout_subdir(struct csr1212_keyval *dir,
830 struct csr1212_keyval **layout_tail)
832 struct csr1212_dentry *dentry;
833 struct csr1212_keyval *dkv;
834 struct csr1212_keyval *last_extkey_spec = NULL;
835 struct csr1212_keyval *last_extkey = NULL;
838 for (dentry = dir->value.directory.dentries_head; dentry;
839 dentry = dentry->next) {
840 for (dkv = dentry->kv; dkv; dkv = dkv->associate) {
841 /* Special Case: Extended Key Specifier_ID */
842 if (dkv->key.id == CSR1212_KV_ID_EXTENDED_KEY_SPECIFIER_ID) {
843 if (last_extkey_spec == NULL) {
844 last_extkey_spec = dkv;
845 } else if (dkv->value.immediate != last_extkey_spec->value.immediate) {
846 last_extkey_spec = dkv;
850 /* Special Case: Extended Key */
851 } else if (dkv->key.id == CSR1212_KV_ID_EXTENDED_KEY) {
852 if (last_extkey == NULL) {
854 } else if (dkv->value.immediate != last_extkey->value.immediate) {
863 switch(dkv->key.type) {
865 case CSR1212_KV_TYPE_IMMEDIATE:
866 case CSR1212_KV_TYPE_CSR_OFFSET:
868 case CSR1212_KV_TYPE_LEAF:
869 case CSR1212_KV_TYPE_DIRECTORY:
870 /* Remove from list */
872 dkv->prev->next = dkv->next;
874 dkv->next->prev = dkv->prev;
875 if (dkv == *layout_tail)
876 *layout_tail = dkv->prev;
878 /* Special case: Extended ROM leafs */
879 if (dkv->key.id == CSR1212_KV_ID_EXTENDED_ROM) {
880 dkv->value.leaf.len = 0; /* initialize to zero */
881 /* Don't add Extended ROM leafs in the layout list,
882 * they are handled differently. */
886 /* Add to tail of list */
888 dkv->prev = *layout_tail;
889 (*layout_tail)->next = dkv;
898 size_t csr1212_generate_layout_order(struct csr1212_keyval *kv)
900 struct csr1212_keyval *ltail = kv;
904 switch(kv->key.type) {
905 case CSR1212_KV_TYPE_LEAF:
906 /* Add 1 quadlet for crc/len field */
907 agg_size += kv->value.leaf.len + 1;
910 case CSR1212_KV_TYPE_DIRECTORY:
911 kv->value.directory.len = csr1212_generate_layout_subdir(kv, <ail);
912 /* Add 1 quadlet for crc/len field */
913 agg_size += kv->value.directory.len + 1;
918 return quads_to_bytes(agg_size);
921 struct csr1212_keyval *csr1212_generate_positions(struct csr1212_csr_rom_cache *cache,
922 struct csr1212_keyval *start_kv,
925 struct csr1212_keyval *kv = start_kv;
926 struct csr1212_keyval *okv = start_kv;
928 int kv_len = 0, okv_len = 0;
930 cache->layout_head = kv;
932 while(kv && pos < cache->size) {
933 kv->offset = cache->offset + pos;
935 switch(kv->key.type) {
936 case CSR1212_KV_TYPE_LEAF:
937 kv_len = kv->value.leaf.len;
940 case CSR1212_KV_TYPE_DIRECTORY:
941 kv_len = kv->value.directory.len;
945 /* Should never get here */
949 pos += quads_to_bytes(kv_len + 1);
951 if (pos <= cache->size) {
958 cache->layout_tail = okv;
959 cache->len = (okv->offset - cache->offset) + quads_to_bytes(okv_len + 1);
964 static void csr1212_generate_tree_subdir(struct csr1212_keyval *dir,
965 u_int32_t *data_buffer)
967 struct csr1212_dentry *dentry;
968 struct csr1212_keyval *last_extkey_spec = NULL;
969 struct csr1212_keyval *last_extkey = NULL;
972 for (dentry = dir->value.directory.dentries_head; dentry; dentry = dentry->next) {
973 struct csr1212_keyval *a;
975 for (a = dentry->kv; a; a = a->associate) {
978 /* Special Case: Extended Key Specifier_ID */
979 if (a->key.id == CSR1212_KV_ID_EXTENDED_KEY_SPECIFIER_ID) {
980 if (last_extkey_spec == NULL) {
981 last_extkey_spec = a;
982 } else if (a->value.immediate != last_extkey_spec->value.immediate) {
983 last_extkey_spec = a;
987 /* Special Case: Extended Key */
988 } else if (a->key.id == CSR1212_KV_ID_EXTENDED_KEY) {
989 if (last_extkey == NULL) {
991 } else if (a->value.immediate != last_extkey->value.immediate) {
998 switch(a->key.type) {
999 case CSR1212_KV_TYPE_IMMEDIATE:
1000 value = a->value.immediate;
1002 case CSR1212_KV_TYPE_CSR_OFFSET:
1003 value = a->value.csr_offset;
1005 case CSR1212_KV_TYPE_LEAF:
1007 value -= dir->offset + quads_to_bytes(1+index);
1008 value = bytes_to_quads(value);
1010 case CSR1212_KV_TYPE_DIRECTORY:
1012 value -= dir->offset + quads_to_bytes(1+index);
1013 value = bytes_to_quads(value);
1016 /* Should never get here */
1017 break; /* GDB breakpoint */
1020 value |= (a->key.id & CSR1212_KV_KEY_ID_MASK) << CSR1212_KV_KEY_SHIFT;
1021 value |= (a->key.type & CSR1212_KV_KEY_TYPE_MASK) <<
1022 (CSR1212_KV_KEY_SHIFT + CSR1212_KV_KEY_TYPE_SHIFT);
1023 data_buffer[index] = CSR1212_CPU_TO_BE32(value);
1029 void csr1212_fill_cache(struct csr1212_csr_rom_cache *cache)
1031 struct csr1212_keyval *kv, *nkv;
1032 struct csr1212_keyval_img *kvi;
1034 for (kv = cache->layout_head; kv != cache->layout_tail->next; kv = nkv) {
1035 kvi = (struct csr1212_keyval_img *)
1036 (cache->data + bytes_to_quads(kv->offset - cache->offset));
1037 switch(kv->key.type) {
1039 case CSR1212_KV_TYPE_IMMEDIATE:
1040 case CSR1212_KV_TYPE_CSR_OFFSET:
1041 /* Should never get here */
1042 break; /* GDB breakpoint */
1044 case CSR1212_KV_TYPE_LEAF:
1045 /* Don't copy over Extended ROM areas, they are
1046 * already filled out! */
1047 if (kv->key.id != CSR1212_KV_ID_EXTENDED_ROM)
1048 memcpy(kvi->data, kv->value.leaf.data,
1049 quads_to_bytes(kv->value.leaf.len));
1051 kvi->length = CSR1212_CPU_TO_BE16(kv->value.leaf.len);
1052 kvi->crc = csr1212_crc16(kvi->data, kv->value.leaf.len);
1055 case CSR1212_KV_TYPE_DIRECTORY:
1056 csr1212_generate_tree_subdir(kv, kvi->data);
1058 kvi->length = CSR1212_CPU_TO_BE16(kv->value.directory.len);
1059 kvi->crc = csr1212_crc16(kvi->data, kv->value.directory.len);
1069 int csr1212_generate_csr_image(struct csr1212_csr *csr)
1071 struct csr1212_bus_info_block_img *bi;
1072 struct csr1212_csr_rom_cache *cache;
1073 struct csr1212_keyval *kv;
1079 return CSR1212_EINVAL;
1081 cache = csr->cache_head;
1083 bi = (struct csr1212_bus_info_block_img*)cache->data;
1085 bi->length = bytes_to_quads(csr->bus_info_len) - 1;
1086 bi->crc_length = bi->length;
1087 bi->crc = csr1212_crc16(bi->data, bi->crc_length);
1089 agg_size = csr1212_generate_layout_order(csr->root_kv);
1091 init_offset = csr->bus_info_len;
1093 for (kv = csr->root_kv, cache = csr->cache_head; kv; cache = cache->next) {
1095 /* Estimate approximate number of additional cache
1096 * regions needed (it assumes that the cache holding
1097 * the first 1K Config ROM space always exists). */
1098 int est_c = agg_size / (CSR1212_EXTENDED_ROM_SIZE -
1099 (2 * sizeof(u_int32_t))) + 1;
1101 /* Add additional cache regions, extras will be
1103 for (; est_c; est_c--) {
1104 ret = csr1212_append_new_cache(csr, CSR1212_EXTENDED_ROM_SIZE);
1105 if (ret != CSR1212_SUCCESS)
1108 /* Need to re-layout for additional cache regions */
1109 agg_size = csr1212_generate_layout_order(csr->root_kv);
1111 cache = csr->cache_head;
1112 init_offset = csr->bus_info_len;
1114 kv = csr1212_generate_positions(cache, kv, init_offset);
1115 agg_size -= cache->len;
1116 init_offset = sizeof(u_int32_t);
1119 /* Remove unused, excess cache regions */
1121 struct csr1212_csr_rom_cache *oc = cache;
1123 cache = cache->next;
1124 csr1212_remove_cache(csr, oc);
1127 /* Go through the list backward so that when done, the correct CRC
1128 * will be calculated for the Extended ROM areas. */
1129 for(cache = csr->cache_tail; cache; cache = cache->prev) {
1130 /* Only Extended ROM caches should have this set. */
1131 if (cache->ext_rom) {
1134 /* Make sure the Extended ROM leaf is a multiple of
1135 * max_rom in size. */
1136 leaf_size = (cache->len + (csr->max_rom - 1)) &
1139 /* Zero out the unused ROM region */
1140 memset(cache->data + bytes_to_quads(cache->len), 0x00,
1141 leaf_size - cache->len);
1143 /* Subtract leaf header */
1144 leaf_size -= sizeof(u_int32_t);
1146 /* Update the Extended ROM leaf length */
1147 cache->ext_rom->value.leaf.len =
1148 bytes_to_quads(leaf_size);
1150 /* Zero out the unused ROM region */
1151 memset(cache->data + bytes_to_quads(cache->len), 0x00,
1152 cache->size - cache->len);
1155 /* Copy the data into the cache buffer */
1156 csr1212_fill_cache(cache);
1159 return CSR1212_SUCCESS;
1162 int csr1212_read(struct csr1212_csr *csr, u_int32_t offset, void *buffer, u_int32_t len)
1164 struct csr1212_csr_rom_cache *cache;
1166 for (cache = csr->cache_head; cache; cache = cache->next) {
1167 if (offset >= cache->offset &&
1168 (offset + len) <= (cache->offset + cache->size)) {
1170 &cache->data[bytes_to_quads(offset - cache->offset)],
1172 return CSR1212_SUCCESS;
1173 } else if (((offset < cache->offset) &&
1174 ((offset + len) >= cache->offset)) ||
1175 ((offset >= cache->offset) &&
1176 ((offset + len) > (cache->offset + cache->size)))) {
1177 return CSR1212_EINVAL;
1180 return CSR1212_ENOENT;
1185 /* Parse a chunk of data as a Config ROM */
1187 static int csr1212_parse_bus_info_block(struct csr1212_csr *csr)
1189 struct csr1212_bus_info_block_img *bi;
1190 struct csr1212_cache_region *cr;
1194 /* IEEE 1212 says that the entire bus info block should be readable in
1195 * a single transaction regardless of the max_rom value.
1196 * Unfortunately, many IEEE 1394 devices do not abide by that, so the
1197 * bus info block will be read 1 quadlet at a time. The rest of the
1198 * ConfigROM will be read according to the max_rom field. */
1199 for (i = 0; i < csr->bus_info_len; i += sizeof(csr1212_quad_t)) {
1200 ret = csr->ops->bus_read(csr, CSR1212_CONFIG_ROM_SPACE_BASE + i,
1201 sizeof(csr1212_quad_t),
1202 &csr->cache_head->data[bytes_to_quads(i)],
1204 if (ret != CSR1212_SUCCESS)
1208 bi = (struct csr1212_bus_info_block_img*)csr->cache_head->data;
1209 csr->crc_len = quads_to_bytes(bi->crc_length);
1211 /* IEEE 1212 recommends that crc_len be equal to bus_info_len, but that is not
1212 * always the case, so read the rest of the crc area 1 quadlet at a time. */
1213 for (i = csr->bus_info_len; i <= csr->crc_len; i += sizeof(csr1212_quad_t)) {
1214 ret = csr->ops->bus_read(csr, CSR1212_CONFIG_ROM_SPACE_BASE + i,
1215 sizeof(csr1212_quad_t),
1216 &csr->cache_head->data[bytes_to_quads(i)],
1218 if (ret != CSR1212_SUCCESS)
1222 if (bytes_to_quads(csr->bus_info_len - sizeof(csr1212_quad_t)) != bi->length)
1223 return CSR1212_EINVAL;
1226 /* Apparently there are too many differnt wrong implementations of the
1227 * CRC algorithm that verifying them is moot. */
1228 if ((csr1212_crc16(bi->data, bi->crc_length) != bi->crc) &&
1229 (csr1212_msft_crc16(bi->data, bi->crc_length) != bi->crc))
1230 return CSR1212_EINVAL;
1233 cr = CSR1212_MALLOC(sizeof(struct csr1212_cache_region));
1235 return CSR1212_ENOMEM;
1239 cr->offset_start = 0;
1240 cr->offset_end = csr->crc_len + 4;
1242 csr->cache_head->filled_head = cr;
1243 csr->cache_head->filled_tail = cr;
1245 return CSR1212_SUCCESS;
1248 static inline int csr1212_parse_dir_entry(struct csr1212_keyval *dir,
1251 struct csr1212_csr_rom_cache *cache)
1253 int ret = CSR1212_SUCCESS;
1254 struct csr1212_keyval *k = NULL;
1257 switch(CSR1212_KV_KEY_TYPE(ki)) {
1258 case CSR1212_KV_TYPE_IMMEDIATE:
1259 k = csr1212_new_immediate(CSR1212_KV_KEY_ID(ki),
1260 CSR1212_KV_VAL(ki));
1262 ret = CSR1212_ENOMEM;
1266 k->refcnt = 0; /* Don't keep local reference when parsing. */
1269 case CSR1212_KV_TYPE_CSR_OFFSET:
1270 k = csr1212_new_csr_offset(CSR1212_KV_KEY_ID(ki),
1271 CSR1212_KV_VAL(ki));
1273 ret = CSR1212_ENOMEM;
1276 k->refcnt = 0; /* Don't keep local reference when parsing. */
1280 /* Compute the offset from 0xffff f000 0000. */
1281 offset = quads_to_bytes(CSR1212_KV_VAL(ki)) + kv_pos;
1282 if (offset == kv_pos) {
1283 /* Uh-oh. Can't have a relative offset of 0 for Leaves
1284 * or Directories. The Config ROM image is most likely
1285 * messed up, so we'll just abort here. */
1290 k = csr1212_find_keyval_offset(cache->layout_head, offset);
1293 break; /* Found it. */
1295 if (CSR1212_KV_KEY_TYPE(ki) == CSR1212_KV_TYPE_DIRECTORY) {
1296 k = csr1212_new_directory(CSR1212_KV_KEY_ID(ki));
1298 k = csr1212_new_leaf(CSR1212_KV_KEY_ID(ki), NULL, 0);
1301 ret = CSR1212_ENOMEM;
1304 k->refcnt = 0; /* Don't keep local reference when parsing. */
1305 k->valid = 0; /* Contents not read yet so it's not valid. */
1308 k->prev = cache->layout_tail;
1310 if (cache->layout_tail)
1311 cache->layout_tail->next = k;
1312 cache->layout_tail = k;
1314 ret = csr1212_attach_keyval_to_directory(dir, k);
1317 if (ret != CSR1212_SUCCESS) {
1324 int csr1212_parse_keyval(struct csr1212_keyval *kv,
1325 struct csr1212_csr_rom_cache *cache)
1327 struct csr1212_keyval_img *kvi;
1329 int ret = CSR1212_SUCCESS;
1332 kvi = (struct csr1212_keyval_img*)&cache->data[bytes_to_quads(kv->offset -
1334 kvi_len = CSR1212_BE16_TO_CPU(kvi->length);
1337 /* Apparently there are too many differnt wrong implementations of the
1338 * CRC algorithm that verifying them is moot. */
1339 if ((csr1212_crc16(kvi->data, kvi_len) != kvi->crc) &&
1340 (csr1212_msft_crc16(kvi->data, kvi_len) != kvi->crc)) {
1341 ret = CSR1212_EINVAL;
1346 switch(kv->key.type) {
1347 case CSR1212_KV_TYPE_DIRECTORY:
1348 for (i = 0; i < kvi_len; i++) {
1349 csr1212_quad_t ki = kvi->data[i];
1351 /* Some devices put null entries in their unit
1352 * directories. If we come across such and entry,
1356 ret = csr1212_parse_dir_entry(kv, ki,
1358 quads_to_bytes(i + 1)),
1361 kv->value.directory.len = kvi_len;
1364 case CSR1212_KV_TYPE_LEAF:
1365 if (kv->key.id == CSR1212_KV_ID_EXTENDED_ROM) {
1366 kv->value.leaf.data = cache->data;
1368 kv->value.leaf.data = CSR1212_MALLOC(quads_to_bytes(kvi_len));
1369 if (!kv->value.leaf.data)
1371 ret = CSR1212_ENOMEM;
1375 kv->value.leaf.len = kvi_len;
1376 memcpy(kv->value.leaf.data, kvi->data, quads_to_bytes(kvi_len));
1388 int _csr1212_read_keyval(struct csr1212_csr *csr, struct csr1212_keyval *kv)
1390 struct csr1212_cache_region *cr, *ncr, *newcr = NULL;
1391 struct csr1212_keyval_img *kvi = NULL;
1392 struct csr1212_csr_rom_cache *cache;
1395 u_int32_t *cache_ptr;
1396 u_int16_t kv_len = 0;
1400 return CSR1212_EINVAL;
1402 /* First find which cache the data should be in (or go in if not read
1404 for (cache = csr->cache_head; cache; cache = cache->next) {
1405 if (kv->offset >= cache->offset &&
1406 kv->offset < (cache->offset + cache->size))
1412 struct csr1212_csr_rom_cache *nc;
1414 /* Only create a new cache for Extended ROM leaves. */
1415 if (kv->key.id != CSR1212_KV_ID_EXTENDED_ROM)
1416 return CSR1212_EINVAL;
1418 if (csr->ops->bus_read(csr,
1419 CSR1212_REGISTER_SPACE_BASE + kv->offset,
1420 sizeof(csr1212_quad_t), &q, csr->private)) {
1424 kv->value.leaf.len = quads_to_bytes(CSR1212_BE32_TO_CPU(q)>>16);
1426 nc = csr1212_rom_cache_malloc(kv->offset, kv->value.leaf.len);
1429 csr->cache_tail = nc;
1430 cache->filled_head =
1431 CSR1212_MALLOC(sizeof(struct csr1212_cache_region));
1432 if (!cache->filled_head) {
1433 return CSR1212_ENOMEM;
1436 cache->filled_head->offset_start = 0;
1437 cache->filled_head->offset_end = sizeof(csr1212_quad_t);
1438 cache->filled_tail = cache->filled_head;
1439 cache->filled_head->next = NULL;
1440 cache->filled_head->prev = NULL;
1444 cache_index = kv->offset - cache->offset;
1446 /* Now seach read portions of the cache to see if it is there. */
1447 for (cr = cache->filled_head; cr; cr = cr->next) {
1448 if (cache_index < cr->offset_start) {
1449 newcr = CSR1212_MALLOC(sizeof(struct csr1212_cache_region));
1451 return CSR1212_ENOMEM;
1453 newcr->offset_start = cache_index & ~(csr->max_rom - 1);
1454 newcr->offset_end = newcr->offset_start;
1456 newcr->prev = cr->prev;
1460 } else if ((cache_index >= cr->offset_start) &&
1461 (cache_index < cr->offset_end)) {
1462 kvi = (struct csr1212_keyval_img*)
1463 (&cache->data[bytes_to_quads(cache_index)]);
1464 kv_len = quads_to_bytes(CSR1212_BE16_TO_CPU(kvi->length) +
1467 } else if (cache_index == cr->offset_end)
1472 cr = cache->filled_tail;
1473 newcr = CSR1212_MALLOC(sizeof(struct csr1212_cache_region));
1475 return CSR1212_ENOMEM;
1477 newcr->offset_start = cache_index & ~(csr->max_rom - 1);
1478 newcr->offset_end = newcr->offset_start;
1480 newcr->next = cr->next;
1483 cache->filled_tail = newcr;
1486 while(!kvi || cr->offset_end < cache_index + kv_len) {
1487 cache_ptr = &cache->data[bytes_to_quads(cr->offset_end &
1488 ~(csr->max_rom - 1))];
1490 addr = (CSR1212_CSR_ARCH_REG_SPACE_BASE + cache->offset +
1491 cr->offset_end) & ~(csr->max_rom - 1);
1493 if (csr->ops->bus_read(csr, addr, csr->max_rom, cache_ptr,
1495 if (csr->max_rom == 4)
1496 /* We've got problems! */
1499 /* Apperently the max_rom value was a lie, set it to
1500 * do quadlet reads and try again. */
1505 cr->offset_end += csr->max_rom - (cr->offset_end &
1506 (csr->max_rom - 1));
1508 if (!kvi && (cr->offset_end > cache_index)) {
1509 kvi = (struct csr1212_keyval_img*)
1510 (&cache->data[bytes_to_quads(cache_index)]);
1511 kv_len = quads_to_bytes(CSR1212_BE16_TO_CPU(kvi->length) +
1515 if ((kv_len + (kv->offset - cache->offset)) > cache->size) {
1516 /* The Leaf or Directory claims its length extends
1517 * beyond the ConfigROM image region and thus beyond the
1518 * end of our cache region. Therefore, we abort now
1519 * rather than seg faulting later. */
1525 if (ncr && (cr->offset_end >= ncr->offset_start)) {
1526 /* consolidate region entries */
1527 ncr->offset_start = cr->offset_start;
1530 cr->prev->next = cr->next;
1531 ncr->prev = cr->prev;
1532 if (cache->filled_head == cr)
1533 cache->filled_head = ncr;
1539 return csr1212_parse_keyval(kv, cache);
1544 int csr1212_parse_csr(struct csr1212_csr *csr)
1546 static const int mr_map[] = { 4, 64, 1024, 0 };
1549 if (!csr || !csr->ops->bus_read)
1550 return CSR1212_EINVAL;
1552 ret = csr1212_parse_bus_info_block(csr);
1553 if (ret != CSR1212_SUCCESS)
1556 if (!csr->ops->get_max_rom)
1557 csr->max_rom = mr_map[0]; /* default value */
1559 csr->max_rom = mr_map[csr->ops->get_max_rom(csr->bus_info_data,
1562 csr->cache_head->layout_head = csr->root_kv;
1563 csr->cache_head->layout_tail = csr->root_kv;
1565 csr->root_kv->offset = (CSR1212_CONFIG_ROM_SPACE_BASE & 0xffff) +
1568 csr->root_kv->valid = 0;
1569 csr1212_get_keyval(csr, csr->root_kv);
1571 return CSR1212_SUCCESS;