This commit was manufactured by cvs2svn to create branch 'vserver'.
[linux-2.6.git] / drivers / infiniband / core / cache.c
1 /*
2  * Copyright (c) 2004 Topspin Communications.  All rights reserved.
3  *
4  * This software is available to you under a choice of one of two
5  * licenses.  You may choose to be licensed under the terms of the GNU
6  * General Public License (GPL) Version 2, available from the file
7  * COPYING in the main directory of this source tree, or the
8  * OpenIB.org BSD license below:
9  *
10  *     Redistribution and use in source and binary forms, with or
11  *     without modification, are permitted provided that the following
12  *     conditions are met:
13  *
14  *      - Redistributions of source code must retain the above
15  *        copyright notice, this list of conditions and the following
16  *        disclaimer.
17  *
18  *      - Redistributions in binary form must reproduce the above
19  *        copyright notice, this list of conditions and the following
20  *        disclaimer in the documentation and/or other materials
21  *        provided with the distribution.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30  * SOFTWARE.
31  *
32  * $Id: cache.c 1349 2004-12-16 21:09:43Z roland $
33  */
34
35 #include <linux/version.h>
36 #include <linux/module.h>
37 #include <linux/errno.h>
38 #include <linux/slab.h>
39
40 #include "core_priv.h"
41
42 struct ib_pkey_cache {
43         int             table_len;
44         u16             table[0];
45 };
46
47 struct ib_gid_cache {
48         int             table_len;
49         union ib_gid    table[0];
50 };
51
52 struct ib_update_work {
53         struct work_struct work;
54         struct ib_device  *device;
55         u8                 port_num;
56 };
57
58 static inline int start_port(struct ib_device *device)
59 {
60         return device->node_type == IB_NODE_SWITCH ? 0 : 1;
61 }
62
63 static inline int end_port(struct ib_device *device)
64 {
65         return device->node_type == IB_NODE_SWITCH ? 0 : device->phys_port_cnt;
66 }
67
68 int ib_get_cached_gid(struct ib_device *device,
69                       u8                port_num,
70                       int               index,
71                       union ib_gid     *gid)
72 {
73         struct ib_gid_cache *cache;
74         unsigned long flags;
75         int ret = 0;
76
77         if (port_num < start_port(device) || port_num > end_port(device))
78                 return -EINVAL;
79
80         read_lock_irqsave(&device->cache.lock, flags);
81
82         cache = device->cache.gid_cache[port_num - start_port(device)];
83
84         if (index < 0 || index >= cache->table_len)
85                 ret = -EINVAL;
86         else
87                 *gid = cache->table[index];
88
89         read_unlock_irqrestore(&device->cache.lock, flags);
90
91         return ret;
92 }
93 EXPORT_SYMBOL(ib_get_cached_gid);
94
95 int ib_find_cached_gid(struct ib_device *device,
96                        union ib_gid     *gid,
97                        u8               *port_num,
98                        u16              *index)
99 {
100         struct ib_gid_cache *cache;
101         unsigned long flags;
102         int p, i;
103         int ret = -ENOENT;
104
105         *port_num = -1;
106         if (index)
107                 *index = -1;
108
109         read_lock_irqsave(&device->cache.lock, flags);
110
111         for (p = 0; p <= end_port(device) - start_port(device); ++p) {
112                 cache = device->cache.gid_cache[p];
113                 for (i = 0; i < cache->table_len; ++i) {
114                         if (!memcmp(gid, &cache->table[i], sizeof *gid)) {
115                                 *port_num = p;
116                                 if (index)
117                                         *index = i;
118                                 ret = 0;
119                                 goto found;
120                         }
121                 }
122         }
123 found:
124         read_unlock_irqrestore(&device->cache.lock, flags);
125
126         return ret;
127 }
128 EXPORT_SYMBOL(ib_find_cached_gid);
129
130 int ib_get_cached_pkey(struct ib_device *device,
131                        u8                port_num,
132                        int               index,
133                        u16              *pkey)
134 {
135         struct ib_pkey_cache *cache;
136         unsigned long flags;
137         int ret = 0;
138
139         if (port_num < start_port(device) || port_num > end_port(device))
140                 return -EINVAL;
141
142         read_lock_irqsave(&device->cache.lock, flags);
143
144         cache = device->cache.pkey_cache[port_num - start_port(device)];
145
146         if (index < 0 || index >= cache->table_len)
147                 ret = -EINVAL;
148         else
149                 *pkey = cache->table[index];
150
151         read_unlock_irqrestore(&device->cache.lock, flags);
152
153         return ret;
154 }
155 EXPORT_SYMBOL(ib_get_cached_pkey);
156
157 int ib_find_cached_pkey(struct ib_device *device,
158                         u8                port_num,
159                         u16               pkey,
160                         u16              *index)
161 {
162         struct ib_pkey_cache *cache;
163         unsigned long flags;
164         int i;
165         int ret = -ENOENT;
166
167         if (port_num < start_port(device) || port_num > end_port(device))
168                 return -EINVAL;
169
170         read_lock_irqsave(&device->cache.lock, flags);
171
172         cache = device->cache.pkey_cache[port_num - start_port(device)];
173
174         *index = -1;
175
176         for (i = 0; i < cache->table_len; ++i)
177                 if ((cache->table[i] & 0x7fff) == (pkey & 0x7fff)) {
178                         *index = i;
179                         ret = 0;
180                         break;
181                 }
182
183         read_unlock_irqrestore(&device->cache.lock, flags);
184
185         return ret;
186 }
187 EXPORT_SYMBOL(ib_find_cached_pkey);
188
189 static void ib_cache_update(struct ib_device *device,
190                             u8                port)
191 {
192         struct ib_port_attr       *tprops = NULL;
193         struct ib_pkey_cache      *pkey_cache = NULL, *old_pkey_cache;
194         struct ib_gid_cache       *gid_cache = NULL, *old_gid_cache;
195         int                        i;
196         int                        ret;
197
198         tprops = kmalloc(sizeof *tprops, GFP_KERNEL);
199         if (!tprops)
200                 return;
201
202         ret = ib_query_port(device, port, tprops);
203         if (ret) {
204                 printk(KERN_WARNING "ib_query_port failed (%d) for %s\n",
205                        ret, device->name);
206                 goto err;
207         }
208
209         pkey_cache = kmalloc(sizeof *pkey_cache + tprops->pkey_tbl_len *
210                              sizeof *pkey_cache->table, GFP_KERNEL);
211         if (!pkey_cache)
212                 goto err;
213
214         pkey_cache->table_len = tprops->pkey_tbl_len;
215
216         gid_cache = kmalloc(sizeof *gid_cache + tprops->gid_tbl_len *
217                             sizeof *gid_cache->table, GFP_KERNEL);
218         if (!gid_cache)
219                 goto err;
220
221         gid_cache->table_len = tprops->gid_tbl_len;
222
223         for (i = 0; i < pkey_cache->table_len; ++i) {
224                 ret = ib_query_pkey(device, port, i, pkey_cache->table + i);
225                 if (ret) {
226                         printk(KERN_WARNING "ib_query_pkey failed (%d) for %s (index %d)\n",
227                                ret, device->name, i);
228                         goto err;
229                 }
230         }
231
232         for (i = 0; i < gid_cache->table_len; ++i) {
233                 ret = ib_query_gid(device, port, i, gid_cache->table + i);
234                 if (ret) {
235                         printk(KERN_WARNING "ib_query_gid failed (%d) for %s (index %d)\n",
236                                ret, device->name, i);
237                         goto err;
238                 }
239         }
240
241         write_lock_irq(&device->cache.lock);
242
243         old_pkey_cache = device->cache.pkey_cache[port - start_port(device)];
244         old_gid_cache  = device->cache.gid_cache [port - start_port(device)];
245
246         device->cache.pkey_cache[port - start_port(device)] = pkey_cache;
247         device->cache.gid_cache [port - start_port(device)] = gid_cache;
248
249         write_unlock_irq(&device->cache.lock);
250
251         kfree(old_pkey_cache);
252         kfree(old_gid_cache);
253         kfree(tprops);
254         return;
255
256 err:
257         kfree(pkey_cache);
258         kfree(gid_cache);
259         kfree(tprops);
260 }
261
262 static void ib_cache_task(void *work_ptr)
263 {
264         struct ib_update_work *work = work_ptr;
265
266         ib_cache_update(work->device, work->port_num);
267         kfree(work);
268 }
269
270 static void ib_cache_event(struct ib_event_handler *handler,
271                            struct ib_event *event)
272 {
273         struct ib_update_work *work;
274
275         if (event->event == IB_EVENT_PORT_ERR    ||
276             event->event == IB_EVENT_PORT_ACTIVE ||
277             event->event == IB_EVENT_LID_CHANGE  ||
278             event->event == IB_EVENT_PKEY_CHANGE ||
279             event->event == IB_EVENT_SM_CHANGE) {
280                 work = kmalloc(sizeof *work, GFP_ATOMIC);
281                 if (work) {
282                         INIT_WORK(&work->work, ib_cache_task, work);
283                         work->device   = event->device;
284                         work->port_num = event->element.port_num;
285                         schedule_work(&work->work);
286                 }
287         }
288 }
289
290 static void ib_cache_setup_one(struct ib_device *device)
291 {
292         int p;
293
294         rwlock_init(&device->cache.lock);
295
296         device->cache.pkey_cache =
297                 kmalloc(sizeof *device->cache.pkey_cache *
298                         (end_port(device) - start_port(device) + 1), GFP_KERNEL);
299         device->cache.gid_cache =
300                 kmalloc(sizeof *device->cache.pkey_cache *
301                         (end_port(device) - start_port(device) + 1), GFP_KERNEL);
302
303         if (!device->cache.pkey_cache || !device->cache.gid_cache) {
304                 printk(KERN_WARNING "Couldn't allocate cache "
305                        "for %s\n", device->name);
306                 goto err;
307         }
308
309         for (p = 0; p <= end_port(device) - start_port(device); ++p) {
310                 device->cache.pkey_cache[p] = NULL;
311                 device->cache.gid_cache [p] = NULL;
312                 ib_cache_update(device, p + start_port(device));
313         }
314
315         INIT_IB_EVENT_HANDLER(&device->cache.event_handler,
316                               device, ib_cache_event);
317         if (ib_register_event_handler(&device->cache.event_handler))
318                 goto err_cache;
319
320         return;
321
322 err_cache:
323         for (p = 0; p <= end_port(device) - start_port(device); ++p) {
324                 kfree(device->cache.pkey_cache[p]);
325                 kfree(device->cache.gid_cache[p]);
326         }
327
328 err:
329         kfree(device->cache.pkey_cache);
330         kfree(device->cache.gid_cache);
331 }
332
333 static void ib_cache_cleanup_one(struct ib_device *device)
334 {
335         int p;
336
337         ib_unregister_event_handler(&device->cache.event_handler);
338         flush_scheduled_work();
339
340         for (p = 0; p <= end_port(device) - start_port(device); ++p) {
341                 kfree(device->cache.pkey_cache[p]);
342                 kfree(device->cache.gid_cache[p]);
343         }
344
345         kfree(device->cache.pkey_cache);
346         kfree(device->cache.gid_cache);
347 }
348
349 static struct ib_client cache_client = {
350         .name   = "cache",
351         .add    = ib_cache_setup_one,
352         .remove = ib_cache_cleanup_one
353 };
354
355 int __init ib_cache_setup(void)
356 {
357         return ib_register_client(&cache_client);
358 }
359
360 void __exit ib_cache_cleanup(void)
361 {
362         ib_unregister_client(&cache_client);
363 }