2 * csr1212.c -- IEEE 1212 Control and Status Register support for Linux
4 * Copyright (C) 2003 Francois Retief <fgretief@sun.ac.za>
5 * Steve Kinneberg <kinnebergsteve@acmsystems.com>
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions are met:
10 * 1. Redistributions of source code must retain the above copyright notice,
11 * this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * 3. The name of the author may not be used to endorse or promote products
16 * derived from this software without specific prior written permission.
18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
19 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
20 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
21 * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
22 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
23 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
24 * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
25 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
26 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
27 * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32 * - Verify interface consistency: i.e., public functions that take a size
33 * parameter expect size to be in bytes.
34 * - Convenience functions for reading a block of data from a given offset.
44 /* Permitted key type for each key id */
45 #define __I (1 << CSR1212_KV_TYPE_IMMEDIATE)
46 #define __C (1 << CSR1212_KV_TYPE_CSR_OFFSET)
47 #define __D (1 << CSR1212_KV_TYPE_DIRECTORY)
48 #define __L (1 << CSR1212_KV_TYPE_LEAF)
49 static const u_int8_t csr1212_key_id_type_map[0x30] = {
51 __D | __L, /* Descriptor */
52 __I | __D | __L, /* Bus_Dependent_Info */
53 __I | __D | __L, /* Vendor */
54 __I, /* Hardware_Version */
56 __D | __L, /* Module */
57 0, 0, 0, 0, /* Reserved */
58 __I, /* Node_Capabilities */
60 0, 0, 0, /* Reserved */
62 __I, /* Specifier_ID */
64 __I | __C | __D | __L, /* Dependent_Info */
65 __L, /* Unit_Location */
71 __L, /* Extended_ROM */
72 __I, /* Extended_Key_Specifier_ID */
73 __I, /* Extended_Key */
74 __I | __C | __D | __L, /* Extended_Data */
75 __L, /* Modifiable_Descriptor */
76 __I, /* Directory_ID */
85 #define quads_to_bytes(_q) ((_q) * sizeof(u_int32_t))
86 #define bytes_to_quads(_b) (((_b) + sizeof(u_int32_t) - 1) / sizeof(u_int32_t))
88 static inline void free_keyval(struct csr1212_keyval *kv)
90 if (kv->key.type == CSR1212_KV_TYPE_LEAF)
91 CSR1212_FREE(kv->value.leaf.data);
96 static u_int16_t csr1212_crc16(const u_int32_t *buffer, size_t length)
100 u_int16_t sum, crc = 0;
102 for (; length; length--) {
103 data = CSR1212_BE32_TO_CPU(*buffer);
105 for (shift = 28; shift >= 0; shift -= 4 ) {
106 sum = ((crc >> 12) ^ (data >> shift)) & 0xf;
107 crc = (crc << 4) ^ (sum << 12) ^ (sum << 5) ^ (sum);
112 return CSR1212_CPU_TO_BE16(crc);
116 /* Microsoft computes the CRC with the bytes in reverse order. Therefore we
117 * have a special version of the CRC algorithm to account for their buggy
119 static u_int16_t csr1212_msft_crc16(const u_int32_t *buffer, size_t length)
123 u_int16_t sum, crc = 0;
125 for (; length; length--) {
126 data = CSR1212_LE32_TO_CPU(*buffer);
128 for (shift = 28; shift >= 0; shift -= 4 ) {
129 sum = ((crc >> 12) ^ (data >> shift)) & 0xf;
130 crc = (crc << 4) ^ (sum << 12) ^ (sum << 5) ^ (sum);
135 return CSR1212_CPU_TO_BE16(crc);
139 static inline struct csr1212_dentry *csr1212_find_keyval(struct csr1212_keyval *dir,
140 struct csr1212_keyval *kv)
142 struct csr1212_dentry *pos;
144 for (pos = dir->value.directory.dentries_head;
145 pos != NULL; pos = pos->next) {
153 static inline struct csr1212_keyval *csr1212_find_keyval_offset(struct csr1212_keyval *kv_list,
156 struct csr1212_keyval *kv;
158 for (kv = kv_list; kv != NULL; kv = kv->next) {
159 if (kv->offset == offset)
166 /* Creation Routines */
167 struct csr1212_csr *csr1212_create_csr(struct csr1212_bus_ops *ops,
168 size_t bus_info_size, void *private)
170 struct csr1212_csr *csr;
172 csr = CSR1212_MALLOC(sizeof(*csr));
177 csr1212_rom_cache_malloc(CSR1212_CONFIG_ROM_SPACE_OFFSET,
178 CSR1212_CONFIG_ROM_SPACE_SIZE);
179 if (!csr->cache_head) {
184 /* The keyval key id is not used for the root node, but a valid key id
185 * that can be used for a directory needs to be passed to
186 * csr1212_new_directory(). */
187 csr->root_kv = csr1212_new_directory(CSR1212_KV_ID_VENDOR);
189 CSR1212_FREE(csr->cache_head);
194 csr->bus_info_data = csr->cache_head->data;
195 csr->bus_info_len = bus_info_size;
196 csr->crc_len = bus_info_size;
198 csr->private = private;
199 csr->cache_tail = csr->cache_head;
206 void csr1212_init_local_csr(struct csr1212_csr *csr,
207 const u_int32_t *bus_info_data, int max_rom)
209 csr->max_rom = max_rom;
210 memcpy(csr->bus_info_data, bus_info_data, csr->bus_info_len);
214 static struct csr1212_keyval *csr1212_new_keyval(u_int8_t type, u_int8_t key)
216 struct csr1212_keyval *kv;
218 if (key < 0x30 && ((csr1212_key_id_type_map[key] & (1 << type)) == 0))
221 kv = CSR1212_MALLOC(sizeof(*kv));
228 kv->associate = NULL;
238 struct csr1212_keyval *csr1212_new_immediate(u_int8_t key, u_int32_t value)
240 struct csr1212_keyval *kv = csr1212_new_keyval(CSR1212_KV_TYPE_IMMEDIATE, key);
245 kv->value.immediate = value;
250 struct csr1212_keyval *csr1212_new_leaf(u_int8_t key, const void *data, size_t data_len)
252 struct csr1212_keyval *kv = csr1212_new_keyval(CSR1212_KV_TYPE_LEAF, key);
258 kv->value.leaf.data = CSR1212_MALLOC(data_len);
259 if (!kv->value.leaf.data) {
265 memcpy(kv->value.leaf.data, data, data_len);
267 kv->value.leaf.data = NULL;
270 kv->value.leaf.len = bytes_to_quads(data_len);
277 struct csr1212_keyval *csr1212_new_csr_offset(u_int8_t key, u_int32_t csr_offset)
279 struct csr1212_keyval *kv = csr1212_new_keyval(CSR1212_KV_TYPE_CSR_OFFSET, key);
284 kv->value.csr_offset = csr_offset;
291 struct csr1212_keyval *csr1212_new_directory(u_int8_t key)
293 struct csr1212_keyval *kv = csr1212_new_keyval(CSR1212_KV_TYPE_DIRECTORY, key);
298 kv->value.directory.len = 0;
300 kv->value.directory.dentries_head = NULL;
301 kv->value.directory.dentries_tail = NULL;
306 int csr1212_associate_keyval(struct csr1212_keyval *kv,
307 struct csr1212_keyval *associate)
309 if (!kv || !associate)
310 return CSR1212_EINVAL;
312 if (kv->key.id == CSR1212_KV_ID_DESCRIPTOR ||
313 (associate->key.id != CSR1212_KV_ID_DESCRIPTOR &&
314 associate->key.id != CSR1212_KV_ID_DEPENDENT_INFO &&
315 associate->key.id != CSR1212_KV_ID_EXTENDED_KEY &&
316 associate->key.id != CSR1212_KV_ID_EXTENDED_DATA &&
317 associate->key.id < 0x30))
318 return CSR1212_EINVAL;
320 if (kv->key.id == CSR1212_KV_ID_EXTENDED_KEY_SPECIFIER_ID &&
321 associate->key.id != CSR1212_KV_ID_EXTENDED_KEY)
322 return CSR1212_EINVAL;
324 if (kv->key.id == CSR1212_KV_ID_EXTENDED_KEY &&
325 associate->key.id != CSR1212_KV_ID_EXTENDED_DATA)
326 return CSR1212_EINVAL;
328 if (associate->key.id == CSR1212_KV_ID_EXTENDED_KEY &&
329 kv->key.id != CSR1212_KV_ID_EXTENDED_KEY_SPECIFIER_ID)
330 return CSR1212_EINVAL;
332 if (associate->key.id == CSR1212_KV_ID_EXTENDED_DATA &&
333 kv->key.id != CSR1212_KV_ID_EXTENDED_KEY)
334 return CSR1212_EINVAL;
337 csr1212_release_keyval(kv->associate);
340 kv->associate = associate;
342 return CSR1212_SUCCESS;
345 int csr1212_attach_keyval_to_directory(struct csr1212_keyval *dir,
346 struct csr1212_keyval *kv)
348 struct csr1212_dentry *dentry;
350 if (!kv || !dir || dir->key.type != CSR1212_KV_TYPE_DIRECTORY)
351 return CSR1212_EINVAL;
353 dentry = CSR1212_MALLOC(sizeof(*dentry));
355 return CSR1212_ENOMEM;
362 dentry->prev = dir->value.directory.dentries_tail;
364 if (!dir->value.directory.dentries_head)
365 dir->value.directory.dentries_head = dentry;
367 if (dir->value.directory.dentries_tail)
368 dir->value.directory.dentries_tail->next = dentry;
369 dir->value.directory.dentries_tail = dentry;
371 return CSR1212_SUCCESS;
374 struct csr1212_keyval *csr1212_new_extended_immediate(u_int32_t spec, u_int32_t key,
377 struct csr1212_keyval *kvs, *kvk, *kvv;
379 kvs = csr1212_new_immediate(CSR1212_KV_ID_EXTENDED_KEY_SPECIFIER_ID, spec);
380 kvk = csr1212_new_immediate(CSR1212_KV_ID_EXTENDED_KEY, key);
381 kvv = csr1212_new_immediate(CSR1212_KV_ID_EXTENDED_DATA, value);
383 if (!kvs || !kvk || !kvv) {
393 /* Don't keep a local reference to the extended key or value. */
397 csr1212_associate_keyval(kvk, kvv);
398 csr1212_associate_keyval(kvs, kvk);
403 struct csr1212_keyval *csr1212_new_extended_leaf(u_int32_t spec, u_int32_t key,
404 const void *data, size_t data_len)
406 struct csr1212_keyval *kvs, *kvk, *kvv;
408 kvs = csr1212_new_immediate(CSR1212_KV_ID_EXTENDED_KEY_SPECIFIER_ID, spec);
409 kvk = csr1212_new_immediate(CSR1212_KV_ID_EXTENDED_KEY, key);
410 kvv = csr1212_new_leaf(CSR1212_KV_ID_EXTENDED_DATA, data, data_len);
412 if (!kvs || !kvk || !kvv) {
422 /* Don't keep a local reference to the extended key or value. */
426 csr1212_associate_keyval(kvk, kvv);
427 csr1212_associate_keyval(kvs, kvk);
432 struct csr1212_keyval *csr1212_new_descriptor_leaf(u_int8_t dtype, u_int32_t specifier_id,
433 const void *data, size_t data_len)
435 struct csr1212_keyval *kv;
437 kv = csr1212_new_leaf(CSR1212_KV_ID_DESCRIPTOR, NULL,
438 data_len + CSR1212_DESCRIPTOR_LEAF_OVERHEAD);
442 CSR1212_DESCRIPTOR_LEAF_SET_TYPE(kv, dtype);
443 CSR1212_DESCRIPTOR_LEAF_SET_SPECIFIER_ID(kv, specifier_id);
446 memcpy(CSR1212_DESCRIPTOR_LEAF_DATA(kv), data, data_len);
453 struct csr1212_keyval *csr1212_new_textual_descriptor_leaf(u_int8_t cwidth,
459 struct csr1212_keyval *kv;
462 kv = csr1212_new_descriptor_leaf(0, 0, NULL, data_len +
463 CSR1212_TEXTUAL_DESCRIPTOR_LEAF_OVERHEAD);
467 CSR1212_TEXTUAL_DESCRIPTOR_LEAF_SET_WIDTH(kv, cwidth);
468 CSR1212_TEXTUAL_DESCRIPTOR_LEAF_SET_CHAR_SET(kv, cset);
469 CSR1212_TEXTUAL_DESCRIPTOR_LEAF_SET_LANGUAGE(kv, language);
471 lstr = (char*)CSR1212_TEXTUAL_DESCRIPTOR_LEAF_DATA(kv);
473 /* make sure last quadlet is zeroed out */
474 *((u_int32_t*)&(lstr[(data_len - 1) & ~0x3])) = 0;
476 /* don't copy the NUL terminator */
477 memcpy(lstr, data, data_len);
482 static int csr1212_check_minimal_ascii(const char *s)
484 static const char minimal_ascii_table[] = {
485 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x07,
486 0x00, 0x00, 0x0a, 0x00, 0x0C, 0x0D, 0x00, 0x00,
487 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
488 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
489 0x20, 0x21, 0x22, 0x00, 0x00, 0x25, 0x26, 0x27,
490 0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f,
491 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37,
492 0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f,
493 0x40, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47,
494 0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f,
495 0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57,
496 0x58, 0x59, 0x5a, 0x00, 0x00, 0x00, 0x00, 0x5f,
497 0x00, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67,
498 0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f,
499 0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77,
500 0x78, 0x79, 0x7a, 0x00, 0x00, 0x00, 0x00, 0x00,
503 if (minimal_ascii_table[*s & 0x7F] != *s)
504 return -1; /* failed */
506 /* String conforms to minimal-ascii, as specified by IEEE 1212,
511 struct csr1212_keyval *csr1212_new_string_descriptor_leaf(const char *s)
513 /* Check if string conform to minimal_ascii format */
514 if (csr1212_check_minimal_ascii(s))
517 /* IEEE 1212, par. 7.5.4.1 Textual descriptors (minimal ASCII) */
518 return csr1212_new_textual_descriptor_leaf(0, 0, 0, s, strlen(s));
521 struct csr1212_keyval *csr1212_new_icon_descriptor_leaf(u_int32_t version,
522 u_int8_t palette_depth,
523 u_int8_t color_space,
530 static const int pd[4] = { 0, 4, 16, 256 };
531 static const int cs[16] = { 4, 2 };
532 struct csr1212_keyval *kv;
533 int palette_size = pd[palette_depth] * cs[color_space];
534 int pixel_size = (hscan * vscan + 3) & ~0x3;
536 if ((palette_depth && !palette) || !pixels)
539 kv = csr1212_new_descriptor_leaf(1, 0, NULL,
540 palette_size + pixel_size +
541 CSR1212_ICON_DESCRIPTOR_LEAF_OVERHEAD);
545 CSR1212_ICON_DESCRIPTOR_LEAF_SET_VERSION(kv, version);
546 CSR1212_ICON_DESCRIPTOR_LEAF_SET_PALETTE_DEPTH(kv, palette_depth);
547 CSR1212_ICON_DESCRIPTOR_LEAF_SET_COLOR_SPACE(kv, color_space);
548 CSR1212_ICON_DESCRIPTOR_LEAF_SET_LANGUAGE(kv, language);
549 CSR1212_ICON_DESCRIPTOR_LEAF_SET_HSCAN(kv, hscan);
550 CSR1212_ICON_DESCRIPTOR_LEAF_SET_VSCAN(kv, vscan);
553 memcpy(CSR1212_ICON_DESCRIPTOR_LEAF_PALETTE(kv), palette,
556 memcpy(CSR1212_ICON_DESCRIPTOR_LEAF_PIXELS(kv), pixels, pixel_size);
561 struct csr1212_keyval *csr1212_new_modifiable_descriptor_leaf(u_int16_t max_size,
564 struct csr1212_keyval *kv;
566 /* IEEE 1212, par. 7.5.4.3 Modifiable descriptors */
567 kv = csr1212_new_leaf(CSR1212_KV_ID_MODIFIABLE_DESCRIPTOR, NULL, sizeof(u_int64_t));
571 CSR1212_MODIFIABLE_DESCRIPTOR_SET_MAX_SIZE(kv, max_size);
572 CSR1212_MODIFIABLE_DESCRIPTOR_SET_ADDRESS_HI(kv, address);
573 CSR1212_MODIFIABLE_DESCRIPTOR_SET_ADDRESS_LO(kv, address);
578 static int csr1212_check_keyword(const char *s)
582 if (('A' <= *s) && (*s <= 'Z'))
584 if (('0' <= *s) && (*s <= '9'))
589 return -1; /* failed */
591 /* String conforms to keyword, as specified by IEEE 1212,
593 return CSR1212_SUCCESS;
596 struct csr1212_keyval *csr1212_new_keyword_leaf(int strc, const char *strv[])
598 struct csr1212_keyval *kv;
602 /* Check all keywords to see if they conform to restrictions:
603 * Only the following characters is allowed ['A'..'Z','0'..'9','-']
604 * Each word is zero-terminated.
605 * Also calculate the total length of the keywords.
607 for (i = 0; i < strc; i++) {
608 if (!strv[i] || csr1212_check_keyword(strv[i])) {
611 data_len += strlen(strv[i]) + 1; /* Add zero-termination char. */
614 /* IEEE 1212, par. 7.6.5 Keyword leaves */
615 kv = csr1212_new_leaf(CSR1212_KV_ID_KEYWORD, NULL, data_len);
619 buffer = (char *)kv->value.leaf.data;
621 /* make sure last quadlet is zeroed out */
622 *((u_int32_t*)&(buffer[(data_len - 1) & ~0x3])) = 0;
624 /* Copy keyword(s) into leaf data buffer */
625 for (i = 0; i < strc; i++) {
626 int len = strlen(strv[i]) + 1;
627 memcpy(buffer, strv[i], len);
634 /* Destruction Routines */
636 void csr1212_detach_keyval_from_directory(struct csr1212_keyval *dir,
637 struct csr1212_keyval *kv)
639 struct csr1212_dentry *dentry;
641 if (!kv || !dir || dir->key.type != CSR1212_KV_TYPE_DIRECTORY)
644 dentry = csr1212_find_keyval(dir, kv);
650 dentry->prev->next = dentry->next;
652 dentry->next->prev = dentry->prev;
653 if (dir->value.directory.dentries_head == dentry)
654 dir->value.directory.dentries_head = dentry->next;
655 if (dir->value.directory.dentries_tail == dentry)
656 dir->value.directory.dentries_tail = dentry->prev;
658 CSR1212_FREE(dentry);
660 csr1212_release_keyval(kv);
664 void csr1212_disassociate_keyval(struct csr1212_keyval *kv)
667 csr1212_release_keyval(kv->associate);
670 kv->associate = NULL;
674 /* This function is used to free the memory taken by a keyval. If the given
675 * keyval is a directory type, then any keyvals contained in that directory
676 * will be destroyed as well if their respective refcnts are 0. By means of
677 * list manipulation, this routine will descend a directory structure in a
678 * non-recursive manner. */
679 void _csr1212_destroy_keyval(struct csr1212_keyval *kv)
681 struct csr1212_keyval *k, *a;
682 struct csr1212_dentry dentry;
683 struct csr1212_dentry *head, *tail;
703 if (k->key.type == CSR1212_KV_TYPE_DIRECTORY) {
704 /* If the current entry is a directory, then move all
705 * the entries to the destruction list. */
706 if (k->value.directory.dentries_head) {
707 tail->next = k->value.directory.dentries_head;
708 k->value.directory.dentries_head->prev = tail;
709 tail = k->value.directory.dentries_tail;
718 if (head->prev && head->prev != &dentry) {
719 CSR1212_FREE(head->prev);
722 } else if (tail != &dentry)
728 void csr1212_destroy_csr(struct csr1212_csr *csr)
730 struct csr1212_csr_rom_cache *c, *oc;
731 struct csr1212_cache_region *cr, *ocr;
733 csr1212_release_keyval(csr->root_kv);
753 /* CSR Image Creation */
755 static int csr1212_append_new_cache(struct csr1212_csr *csr, size_t romsize)
757 struct csr1212_csr_rom_cache *cache;
760 if (!csr || !csr->ops->allocate_addr_range ||
761 !csr->ops->release_addr)
762 return CSR1212_ENOMEM;
764 /* ROM size must be a multiple of csr->max_rom */
765 romsize = (romsize + (csr->max_rom - 1)) & ~(csr->max_rom - 1);
767 csr_addr = csr->ops->allocate_addr_range(romsize, csr->max_rom, csr->private);
768 if (csr_addr == ~0ULL) {
769 return CSR1212_ENOMEM;
771 if (csr_addr < CSR1212_REGISTER_SPACE_BASE) {
772 /* Invalid address returned from allocate_addr_range(). */
773 csr->ops->release_addr(csr_addr, csr->private);
774 return CSR1212_ENOMEM;
777 cache = csr1212_rom_cache_malloc(csr_addr - CSR1212_REGISTER_SPACE_BASE, romsize);
779 csr->ops->release_addr(csr_addr, csr->private);
780 return CSR1212_ENOMEM;
783 cache->ext_rom = csr1212_new_keyval(CSR1212_KV_TYPE_LEAF, CSR1212_KV_ID_EXTENDED_ROM);
784 if (!cache->ext_rom) {
785 csr->ops->release_addr(csr_addr, csr->private);
787 return CSR1212_ENOMEM;
790 if (csr1212_attach_keyval_to_directory(csr->root_kv, cache->ext_rom) != CSR1212_SUCCESS) {
791 csr1212_release_keyval(cache->ext_rom);
792 csr->ops->release_addr(csr_addr, csr->private);
794 return CSR1212_ENOMEM;
796 cache->ext_rom->offset = csr_addr - CSR1212_REGISTER_SPACE_BASE;
797 cache->ext_rom->value.leaf.len = 0;
799 /* Add cache to tail of cache list */
800 cache->prev = csr->cache_tail;
801 csr->cache_tail->next = cache;
802 csr->cache_tail = cache;
803 return CSR1212_SUCCESS;
806 static inline void csr1212_remove_cache(struct csr1212_csr *csr,
807 struct csr1212_csr_rom_cache *cache)
809 if (csr->cache_head == cache)
810 csr->cache_head = cache->next;
811 if (csr->cache_tail == cache)
812 csr->cache_tail = cache->prev;
815 cache->prev->next = cache->next;
817 cache->next->prev = cache->prev;
819 if (cache->ext_rom) {
820 csr1212_detach_keyval_from_directory(csr->root_kv, cache->ext_rom);
821 csr1212_release_keyval(cache->ext_rom);
827 static int csr1212_generate_layout_subdir(struct csr1212_keyval *dir,
828 struct csr1212_keyval **layout_tail)
830 struct csr1212_dentry *dentry;
831 struct csr1212_keyval *dkv;
832 struct csr1212_keyval *last_extkey_spec = NULL;
833 struct csr1212_keyval *last_extkey = NULL;
836 for (dentry = dir->value.directory.dentries_head; dentry;
837 dentry = dentry->next) {
838 for (dkv = dentry->kv; dkv; dkv = dkv->associate) {
839 /* Special Case: Extended Key Specifier_ID */
840 if (dkv->key.id == CSR1212_KV_ID_EXTENDED_KEY_SPECIFIER_ID) {
841 if (last_extkey_spec == NULL) {
842 last_extkey_spec = dkv;
843 } else if (dkv->value.immediate != last_extkey_spec->value.immediate) {
844 last_extkey_spec = dkv;
848 /* Special Case: Extended Key */
849 } else if (dkv->key.id == CSR1212_KV_ID_EXTENDED_KEY) {
850 if (last_extkey == NULL) {
852 } else if (dkv->value.immediate != last_extkey->value.immediate) {
861 switch(dkv->key.type) {
863 case CSR1212_KV_TYPE_IMMEDIATE:
864 case CSR1212_KV_TYPE_CSR_OFFSET:
866 case CSR1212_KV_TYPE_LEAF:
867 case CSR1212_KV_TYPE_DIRECTORY:
868 /* Remove from list */
870 dkv->prev->next = dkv->next;
872 dkv->next->prev = dkv->prev;
873 if (dkv == *layout_tail)
874 *layout_tail = dkv->prev;
876 /* Special case: Extended ROM leafs */
877 if (dkv->key.id == CSR1212_KV_ID_EXTENDED_ROM) {
878 dkv->value.leaf.len = 0; /* initialize to zero */
879 /* Don't add Extended ROM leafs in the layout list,
880 * they are handled differently. */
884 /* Add to tail of list */
886 dkv->prev = *layout_tail;
887 (*layout_tail)->next = dkv;
896 size_t csr1212_generate_layout_order(struct csr1212_keyval *kv)
898 struct csr1212_keyval *ltail = kv;
902 switch(kv->key.type) {
903 case CSR1212_KV_TYPE_LEAF:
904 /* Add 1 quadlet for crc/len field */
905 agg_size += kv->value.leaf.len + 1;
908 case CSR1212_KV_TYPE_DIRECTORY:
909 kv->value.directory.len = csr1212_generate_layout_subdir(kv, <ail);
910 /* Add 1 quadlet for crc/len field */
911 agg_size += kv->value.directory.len + 1;
916 return quads_to_bytes(agg_size);
919 struct csr1212_keyval *csr1212_generate_positions(struct csr1212_csr_rom_cache *cache,
920 struct csr1212_keyval *start_kv,
923 struct csr1212_keyval *kv = start_kv;
924 struct csr1212_keyval *okv = start_kv;
926 int kv_len = 0, okv_len = 0;
928 cache->layout_head = kv;
930 while(kv && pos < cache->size) {
931 kv->offset = cache->offset + pos;
933 switch(kv->key.type) {
934 case CSR1212_KV_TYPE_LEAF:
935 kv_len = kv->value.leaf.len;
938 case CSR1212_KV_TYPE_DIRECTORY:
939 kv_len = kv->value.directory.len;
943 /* Should never get here */
947 pos += quads_to_bytes(kv_len + 1);
949 if (pos <= cache->size) {
956 cache->layout_tail = okv;
957 cache->len = (okv->offset - cache->offset) + quads_to_bytes(okv_len + 1);
962 static void csr1212_generate_tree_subdir(struct csr1212_keyval *dir,
963 u_int32_t *data_buffer)
965 struct csr1212_dentry *dentry;
966 struct csr1212_keyval *last_extkey_spec = NULL;
967 struct csr1212_keyval *last_extkey = NULL;
970 for (dentry = dir->value.directory.dentries_head; dentry; dentry = dentry->next) {
971 struct csr1212_keyval *a;
973 for (a = dentry->kv; a; a = a->associate) {
976 /* Special Case: Extended Key Specifier_ID */
977 if (a->key.id == CSR1212_KV_ID_EXTENDED_KEY_SPECIFIER_ID) {
978 if (last_extkey_spec == NULL) {
979 last_extkey_spec = a;
980 } else if (a->value.immediate != last_extkey_spec->value.immediate) {
981 last_extkey_spec = a;
985 /* Special Case: Extended Key */
986 } else if (a->key.id == CSR1212_KV_ID_EXTENDED_KEY) {
987 if (last_extkey == NULL) {
989 } else if (a->value.immediate != last_extkey->value.immediate) {
996 switch(a->key.type) {
997 case CSR1212_KV_TYPE_IMMEDIATE:
998 value = a->value.immediate;
1000 case CSR1212_KV_TYPE_CSR_OFFSET:
1001 value = a->value.csr_offset;
1003 case CSR1212_KV_TYPE_LEAF:
1005 value -= dir->offset + quads_to_bytes(1+index);
1006 value = bytes_to_quads(value);
1008 case CSR1212_KV_TYPE_DIRECTORY:
1010 value -= dir->offset + quads_to_bytes(1+index);
1011 value = bytes_to_quads(value);
1014 /* Should never get here */
1015 break; /* GDB breakpoint */
1018 value |= (a->key.id & CSR1212_KV_KEY_ID_MASK) << CSR1212_KV_KEY_SHIFT;
1019 value |= (a->key.type & CSR1212_KV_KEY_TYPE_MASK) <<
1020 (CSR1212_KV_KEY_SHIFT + CSR1212_KV_KEY_TYPE_SHIFT);
1021 data_buffer[index] = CSR1212_CPU_TO_BE32(value);
1027 void csr1212_fill_cache(struct csr1212_csr_rom_cache *cache)
1029 struct csr1212_keyval *kv, *nkv;
1030 struct csr1212_keyval_img *kvi;
1032 for (kv = cache->layout_head; kv != cache->layout_tail->next; kv = nkv) {
1033 kvi = (struct csr1212_keyval_img *)
1034 (cache->data + bytes_to_quads(kv->offset - cache->offset));
1035 switch(kv->key.type) {
1037 case CSR1212_KV_TYPE_IMMEDIATE:
1038 case CSR1212_KV_TYPE_CSR_OFFSET:
1039 /* Should never get here */
1040 break; /* GDB breakpoint */
1042 case CSR1212_KV_TYPE_LEAF:
1043 /* Don't copy over Extended ROM areas, they are
1044 * already filled out! */
1045 if (kv->key.id != CSR1212_KV_ID_EXTENDED_ROM)
1046 memcpy(kvi->data, kv->value.leaf.data,
1047 quads_to_bytes(kv->value.leaf.len));
1049 kvi->length = CSR1212_CPU_TO_BE16(kv->value.leaf.len);
1050 kvi->crc = csr1212_crc16(kvi->data, kv->value.leaf.len);
1053 case CSR1212_KV_TYPE_DIRECTORY:
1054 csr1212_generate_tree_subdir(kv, kvi->data);
1056 kvi->length = CSR1212_CPU_TO_BE16(kv->value.directory.len);
1057 kvi->crc = csr1212_crc16(kvi->data, kv->value.directory.len);
1067 int csr1212_generate_csr_image(struct csr1212_csr *csr)
1069 struct csr1212_bus_info_block_img *bi;
1070 struct csr1212_csr_rom_cache *cache;
1071 struct csr1212_keyval *kv;
1077 return CSR1212_EINVAL;
1079 cache = csr->cache_head;
1081 bi = (struct csr1212_bus_info_block_img*)cache->data;
1083 bi->length = bytes_to_quads(csr->bus_info_len) - 1;
1084 bi->crc_length = bi->length;
1085 bi->crc = csr1212_crc16(bi->data, bi->crc_length);
1087 agg_size = csr1212_generate_layout_order(csr->root_kv);
1089 init_offset = csr->bus_info_len;
1091 for (kv = csr->root_kv, cache = csr->cache_head; kv; cache = cache->next) {
1093 /* Estimate approximate number of additional cache
1094 * regions needed (it assumes that the cache holding
1095 * the first 1K Config ROM space always exists). */
1096 int est_c = agg_size / (CSR1212_EXTENDED_ROM_SIZE -
1097 (2 * sizeof(u_int32_t))) + 1;
1099 /* Add additional cache regions, extras will be
1101 for (; est_c; est_c--) {
1102 ret = csr1212_append_new_cache(csr, CSR1212_EXTENDED_ROM_SIZE);
1103 if (ret != CSR1212_SUCCESS)
1106 /* Need to re-layout for additional cache regions */
1107 agg_size = csr1212_generate_layout_order(csr->root_kv);
1109 cache = csr->cache_head;
1110 init_offset = csr->bus_info_len;
1112 kv = csr1212_generate_positions(cache, kv, init_offset);
1113 agg_size -= cache->len;
1114 init_offset = sizeof(u_int32_t);
1117 /* Remove unused, excess cache regions */
1119 struct csr1212_csr_rom_cache *oc = cache;
1121 cache = cache->next;
1122 csr1212_remove_cache(csr, oc);
1125 /* Go through the list backward so that when done, the correct CRC
1126 * will be calculated for the Extended ROM areas. */
1127 for(cache = csr->cache_tail; cache; cache = cache->prev) {
1128 /* Only Extended ROM caches should have this set. */
1129 if (cache->ext_rom) {
1132 /* Make sure the Extended ROM leaf is a multiple of
1133 * max_rom in size. */
1134 leaf_size = (cache->len + (csr->max_rom - 1)) &
1137 /* Zero out the unused ROM region */
1138 memset(cache->data + bytes_to_quads(cache->len), 0x00,
1139 leaf_size - cache->len);
1141 /* Subtract leaf header */
1142 leaf_size -= sizeof(u_int32_t);
1144 /* Update the Extended ROM leaf length */
1145 cache->ext_rom->value.leaf.len =
1146 bytes_to_quads(leaf_size);
1148 /* Zero out the unused ROM region */
1149 memset(cache->data + bytes_to_quads(cache->len), 0x00,
1150 cache->size - cache->len);
1153 /* Copy the data into the cache buffer */
1154 csr1212_fill_cache(cache);
1157 return CSR1212_SUCCESS;
1160 int csr1212_read(struct csr1212_csr *csr, u_int32_t offset, void *buffer, u_int32_t len)
1162 struct csr1212_csr_rom_cache *cache;
1164 for (cache = csr->cache_head; cache; cache = cache->next) {
1165 if (offset >= cache->offset &&
1166 (offset + len) <= (cache->offset + cache->size)) {
1168 &cache->data[bytes_to_quads(offset - cache->offset)],
1170 return CSR1212_SUCCESS;
1171 } else if (((offset < cache->offset) &&
1172 ((offset + len) >= cache->offset)) ||
1173 ((offset >= cache->offset) &&
1174 ((offset + len) > (cache->offset + cache->size)))) {
1175 return CSR1212_EINVAL;
1178 return CSR1212_ENOENT;
1183 /* Parse a chunk of data as a Config ROM */
1185 static int csr1212_parse_bus_info_block(struct csr1212_csr *csr)
1187 struct csr1212_bus_info_block_img *bi;
1188 struct csr1212_cache_region *cr;
1192 /* IEEE 1212 says that the entire bus info block should be readable in
1193 * a single transaction regardless of the max_rom value.
1194 * Unfortunately, many IEEE 1394 devices do not abide by that, so the
1195 * bus info block will be read 1 quadlet at a time. The rest of the
1196 * ConfigROM will be read according to the max_rom field. */
1197 for (i = 0; i < csr->bus_info_len; i += sizeof(csr1212_quad_t)) {
1198 ret = csr->ops->bus_read(csr, CSR1212_CONFIG_ROM_SPACE_BASE + i,
1199 sizeof(csr1212_quad_t),
1200 &csr->cache_head->data[bytes_to_quads(i)],
1202 if (ret != CSR1212_SUCCESS)
1206 bi = (struct csr1212_bus_info_block_img*)csr->cache_head->data;
1207 csr->crc_len = quads_to_bytes(bi->crc_length);
1209 /* IEEE 1212 recommends that crc_len be equal to bus_info_len, but that is not
1210 * always the case, so read the rest of the crc area 1 quadlet at a time. */
1211 for (i = csr->bus_info_len; i <= csr->crc_len; i += sizeof(csr1212_quad_t)) {
1212 ret = csr->ops->bus_read(csr, CSR1212_CONFIG_ROM_SPACE_BASE + i,
1213 sizeof(csr1212_quad_t),
1214 &csr->cache_head->data[bytes_to_quads(i)],
1216 if (ret != CSR1212_SUCCESS)
1220 if (bytes_to_quads(csr->bus_info_len - sizeof(csr1212_quad_t)) != bi->length)
1221 return CSR1212_EINVAL;
1224 /* Apparently there are too many differnt wrong implementations of the
1225 * CRC algorithm that verifying them is moot. */
1226 if ((csr1212_crc16(bi->data, bi->crc_length) != bi->crc) &&
1227 (csr1212_msft_crc16(bi->data, bi->crc_length) != bi->crc))
1228 return CSR1212_EINVAL;
1231 cr = CSR1212_MALLOC(sizeof(struct csr1212_cache_region));
1233 return CSR1212_ENOMEM;
1237 cr->offset_start = 0;
1238 cr->offset_end = csr->crc_len + 4;
1240 csr->cache_head->filled_head = cr;
1241 csr->cache_head->filled_tail = cr;
1243 return CSR1212_SUCCESS;
1246 static inline int csr1212_parse_dir_entry(struct csr1212_keyval *dir,
1249 struct csr1212_csr_rom_cache *cache)
1251 int ret = CSR1212_SUCCESS;
1252 struct csr1212_keyval *k = NULL;
1255 switch(CSR1212_KV_KEY_TYPE(ki)) {
1256 case CSR1212_KV_TYPE_IMMEDIATE:
1257 k = csr1212_new_immediate(CSR1212_KV_KEY_ID(ki),
1258 CSR1212_KV_VAL(ki));
1260 ret = CSR1212_ENOMEM;
1264 k->refcnt = 0; /* Don't keep local reference when parsing. */
1267 case CSR1212_KV_TYPE_CSR_OFFSET:
1268 k = csr1212_new_csr_offset(CSR1212_KV_KEY_ID(ki),
1269 CSR1212_KV_VAL(ki));
1271 ret = CSR1212_ENOMEM;
1274 k->refcnt = 0; /* Don't keep local reference when parsing. */
1278 /* Compute the offset from 0xffff f000 0000. */
1279 offset = quads_to_bytes(CSR1212_KV_VAL(ki)) + kv_pos;
1280 if (offset == kv_pos) {
1281 /* Uh-oh. Can't have a relative offset of 0 for Leaves
1282 * or Directories. The Config ROM image is most likely
1283 * messed up, so we'll just abort here. */
1288 k = csr1212_find_keyval_offset(cache->layout_head, offset);
1291 break; /* Found it. */
1293 if (CSR1212_KV_KEY_TYPE(ki) == CSR1212_KV_TYPE_DIRECTORY) {
1294 k = csr1212_new_directory(CSR1212_KV_KEY_ID(ki));
1296 k = csr1212_new_leaf(CSR1212_KV_KEY_ID(ki), NULL, 0);
1299 ret = CSR1212_ENOMEM;
1302 k->refcnt = 0; /* Don't keep local reference when parsing. */
1303 k->valid = 0; /* Contents not read yet so it's not valid. */
1306 k->prev = cache->layout_tail;
1308 if (cache->layout_tail)
1309 cache->layout_tail->next = k;
1310 cache->layout_tail = k;
1312 ret = csr1212_attach_keyval_to_directory(dir, k);
1315 if (ret != CSR1212_SUCCESS) {
1322 int csr1212_parse_keyval(struct csr1212_keyval *kv,
1323 struct csr1212_csr_rom_cache *cache)
1325 struct csr1212_keyval_img *kvi;
1327 int ret = CSR1212_SUCCESS;
1330 kvi = (struct csr1212_keyval_img*)&cache->data[bytes_to_quads(kv->offset -
1332 kvi_len = CSR1212_BE16_TO_CPU(kvi->length);
1335 /* Apparently there are too many differnt wrong implementations of the
1336 * CRC algorithm that verifying them is moot. */
1337 if ((csr1212_crc16(kvi->data, kvi_len) != kvi->crc) &&
1338 (csr1212_msft_crc16(kvi->data, kvi_len) != kvi->crc)) {
1339 ret = CSR1212_EINVAL;
1344 switch(kv->key.type) {
1345 case CSR1212_KV_TYPE_DIRECTORY:
1346 for (i = 0; i < kvi_len; i++) {
1347 csr1212_quad_t ki = kvi->data[i];
1349 /* Some devices put null entries in their unit
1350 * directories. If we come across such and entry,
1354 ret = csr1212_parse_dir_entry(kv, ki,
1356 quads_to_bytes(i + 1)),
1359 kv->value.directory.len = kvi_len;
1362 case CSR1212_KV_TYPE_LEAF:
1363 if (kv->key.id == CSR1212_KV_ID_EXTENDED_ROM) {
1364 kv->value.leaf.data = cache->data;
1366 kv->value.leaf.data = CSR1212_MALLOC(quads_to_bytes(kvi_len));
1367 if (!kv->value.leaf.data)
1369 ret = CSR1212_ENOMEM;
1373 kv->value.leaf.len = kvi_len;
1374 memcpy(kv->value.leaf.data, kvi->data, quads_to_bytes(kvi_len));
1386 int _csr1212_read_keyval(struct csr1212_csr *csr, struct csr1212_keyval *kv)
1388 struct csr1212_cache_region *cr, *ncr, *newcr = NULL;
1389 struct csr1212_keyval_img *kvi = NULL;
1390 struct csr1212_csr_rom_cache *cache;
1393 u_int32_t *cache_ptr;
1394 u_int16_t kv_len = 0;
1398 return CSR1212_EINVAL;
1400 /* First find which cache the data should be in (or go in if not read
1402 for (cache = csr->cache_head; cache; cache = cache->next) {
1403 if (kv->offset >= cache->offset &&
1404 kv->offset < (cache->offset + cache->size))
1410 struct csr1212_csr_rom_cache *nc;
1412 /* Only create a new cache for Extended ROM leaves. */
1413 if (kv->key.id != CSR1212_KV_ID_EXTENDED_ROM)
1414 return CSR1212_EINVAL;
1416 if (csr->ops->bus_read(csr,
1417 CSR1212_REGISTER_SPACE_BASE + kv->offset,
1418 sizeof(csr1212_quad_t), &q, csr->private)) {
1422 kv->value.leaf.len = quads_to_bytes(CSR1212_BE32_TO_CPU(q)>>16);
1424 nc = csr1212_rom_cache_malloc(kv->offset, kv->value.leaf.len);
1427 csr->cache_tail = nc;
1428 cache->filled_head =
1429 CSR1212_MALLOC(sizeof(struct csr1212_cache_region));
1430 if (!cache->filled_head) {
1431 return CSR1212_ENOMEM;
1434 cache->filled_head->offset_start = 0;
1435 cache->filled_head->offset_end = sizeof(csr1212_quad_t);
1436 cache->filled_tail = cache->filled_head;
1437 cache->filled_head->next = NULL;
1438 cache->filled_head->prev = NULL;
1442 cache_index = kv->offset - cache->offset;
1444 /* Now seach read portions of the cache to see if it is there. */
1445 for (cr = cache->filled_head; cr; cr = cr->next) {
1446 if (cache_index < cr->offset_start) {
1447 newcr = CSR1212_MALLOC(sizeof(struct csr1212_cache_region));
1449 return CSR1212_ENOMEM;
1451 newcr->offset_start = cache_index & ~(csr->max_rom - 1);
1452 newcr->offset_end = newcr->offset_start;
1454 newcr->prev = cr->prev;
1458 } else if ((cache_index >= cr->offset_start) &&
1459 (cache_index < cr->offset_end)) {
1460 kvi = (struct csr1212_keyval_img*)
1461 (&cache->data[bytes_to_quads(cache_index)]);
1462 kv_len = quads_to_bytes(CSR1212_BE16_TO_CPU(kvi->length) +
1465 } else if (cache_index == cr->offset_end)
1470 cr = cache->filled_tail;
1471 newcr = CSR1212_MALLOC(sizeof(struct csr1212_cache_region));
1473 return CSR1212_ENOMEM;
1475 newcr->offset_start = cache_index & ~(csr->max_rom - 1);
1476 newcr->offset_end = newcr->offset_start;
1478 newcr->next = cr->next;
1481 cache->filled_tail = newcr;
1484 while(!kvi || cr->offset_end < cache_index + kv_len) {
1485 cache_ptr = &cache->data[bytes_to_quads(cr->offset_end &
1486 ~(csr->max_rom - 1))];
1488 addr = (CSR1212_CSR_ARCH_REG_SPACE_BASE + cache->offset +
1489 cr->offset_end) & ~(csr->max_rom - 1);
1491 if (csr->ops->bus_read(csr, addr, csr->max_rom, cache_ptr,
1493 if (csr->max_rom == 4)
1494 /* We've got problems! */
1497 /* Apperently the max_rom value was a lie, set it to
1498 * do quadlet reads and try again. */
1503 cr->offset_end += csr->max_rom - (cr->offset_end &
1504 (csr->max_rom - 1));
1506 if (!kvi && (cr->offset_end > cache_index)) {
1507 kvi = (struct csr1212_keyval_img*)
1508 (&cache->data[bytes_to_quads(cache_index)]);
1509 kv_len = quads_to_bytes(CSR1212_BE16_TO_CPU(kvi->length) +
1513 if ((kv_len + (kv->offset - cache->offset)) > cache->size) {
1514 /* The Leaf or Directory claims its length extends
1515 * beyond the ConfigROM image region and thus beyond the
1516 * end of our cache region. Therefore, we abort now
1517 * rather than seg faulting later. */
1523 if (ncr && (cr->offset_end >= ncr->offset_start)) {
1524 /* consolidate region entries */
1525 ncr->offset_start = cr->offset_start;
1528 cr->prev->next = cr->next;
1529 ncr->prev = cr->prev;
1530 if (cache->filled_head == cr)
1531 cache->filled_head = ncr;
1537 return csr1212_parse_keyval(kv, cache);
1542 int csr1212_parse_csr(struct csr1212_csr *csr)
1544 static const int mr_map[] = { 4, 64, 1024, 0 };
1547 if (!csr || !csr->ops->bus_read)
1548 return CSR1212_EINVAL;
1550 ret = csr1212_parse_bus_info_block(csr);
1551 if (ret != CSR1212_SUCCESS)
1554 if (!csr->ops->get_max_rom)
1555 csr->max_rom = mr_map[0]; /* default value */
1557 csr->max_rom = mr_map[csr->ops->get_max_rom(csr->bus_info_data,
1560 csr->cache_head->layout_head = csr->root_kv;
1561 csr->cache_head->layout_tail = csr->root_kv;
1563 csr->root_kv->offset = (CSR1212_CONFIG_ROM_SPACE_BASE & 0xffff) +
1566 csr->root_kv->valid = 0;
1567 csr1212_get_keyval(csr, csr->root_kv);
1569 return CSR1212_SUCCESS;