Merge to Fedora kernel-2.6.18-1.2224_FC5 patched with stable patch-2.6.18.1-vs2.0...
[linux-2.6.git] / drivers / infiniband / hw / ipath / ipath_layer.c
1 /*
2  * Copyright (c) 2006 QLogic, Inc. All rights reserved.
3  * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
4  *
5  * This software is available to you under a choice of one of two
6  * licenses.  You may choose to be licensed under the terms of the GNU
7  * General Public License (GPL) Version 2, available from the file
8  * COPYING in the main directory of this source tree, or the
9  * OpenIB.org BSD license below:
10  *
11  *     Redistribution and use in source and binary forms, with or
12  *     without modification, are permitted provided that the following
13  *     conditions are met:
14  *
15  *      - Redistributions of source code must retain the above
16  *        copyright notice, this list of conditions and the following
17  *        disclaimer.
18  *
19  *      - Redistributions in binary form must reproduce the above
20  *        copyright notice, this list of conditions and the following
21  *        disclaimer in the documentation and/or other materials
22  *        provided with the distribution.
23  *
24  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31  * SOFTWARE.
32  */
33
34 /*
35  * These are the routines used by layered drivers, currently just the
36  * layered ethernet driver and verbs layer.
37  */
38
39 #include <linux/io.h>
40 #include <linux/pci.h>
41 #include <asm/byteorder.h>
42
43 #include "ipath_kernel.h"
44 #include "ipath_layer.h"
45 #include "ipath_common.h"
46
47 /* Acquire before ipath_devs_lock. */
48 static DEFINE_MUTEX(ipath_layer_mutex);
49
50 static int ipath_verbs_registered;
51
52 u16 ipath_layer_rcv_opcode;
53
54 static int (*layer_intr)(void *, u32);
55 static int (*layer_rcv)(void *, void *, struct sk_buff *);
56 static int (*layer_rcv_lid)(void *, void *);
57 static int (*verbs_piobufavail)(void *);
58 static void (*verbs_rcv)(void *, void *, void *, u32);
59
60 static void *(*layer_add_one)(int, struct ipath_devdata *);
61 static void (*layer_remove_one)(void *);
62 static void *(*verbs_add_one)(int, struct ipath_devdata *);
63 static void (*verbs_remove_one)(void *);
64 static void (*verbs_timer_cb)(void *);
65
66 int __ipath_layer_intr(struct ipath_devdata *dd, u32 arg)
67 {
68         int ret = -ENODEV;
69
70         if (dd->ipath_layer.l_arg && layer_intr)
71                 ret = layer_intr(dd->ipath_layer.l_arg, arg);
72
73         return ret;
74 }
75
76 int ipath_layer_intr(struct ipath_devdata *dd, u32 arg)
77 {
78         int ret;
79
80         mutex_lock(&ipath_layer_mutex);
81
82         ret = __ipath_layer_intr(dd, arg);
83
84         mutex_unlock(&ipath_layer_mutex);
85
86         return ret;
87 }
88
89 int __ipath_layer_rcv(struct ipath_devdata *dd, void *hdr,
90                       struct sk_buff *skb)
91 {
92         int ret = -ENODEV;
93
94         if (dd->ipath_layer.l_arg && layer_rcv)
95                 ret = layer_rcv(dd->ipath_layer.l_arg, hdr, skb);
96
97         return ret;
98 }
99
100 int __ipath_layer_rcv_lid(struct ipath_devdata *dd, void *hdr)
101 {
102         int ret = -ENODEV;
103
104         if (dd->ipath_layer.l_arg && layer_rcv_lid)
105                 ret = layer_rcv_lid(dd->ipath_layer.l_arg, hdr);
106
107         return ret;
108 }
109
110 int __ipath_verbs_piobufavail(struct ipath_devdata *dd)
111 {
112         int ret = -ENODEV;
113
114         if (dd->verbs_layer.l_arg && verbs_piobufavail)
115                 ret = verbs_piobufavail(dd->verbs_layer.l_arg);
116
117         return ret;
118 }
119
120 int __ipath_verbs_rcv(struct ipath_devdata *dd, void *rc, void *ebuf,
121                       u32 tlen)
122 {
123         int ret = -ENODEV;
124
125         if (dd->verbs_layer.l_arg && verbs_rcv) {
126                 verbs_rcv(dd->verbs_layer.l_arg, rc, ebuf, tlen);
127                 ret = 0;
128         }
129
130         return ret;
131 }
132
133 int ipath_layer_set_linkstate(struct ipath_devdata *dd, u8 newstate)
134 {
135         u32 lstate;
136         int ret;
137
138         switch (newstate) {
139         case IPATH_IB_LINKDOWN:
140                 ipath_set_ib_lstate(dd, INFINIPATH_IBCC_LINKINITCMD_POLL <<
141                                     INFINIPATH_IBCC_LINKINITCMD_SHIFT);
142                 /* don't wait */
143                 ret = 0;
144                 goto bail;
145
146         case IPATH_IB_LINKDOWN_SLEEP:
147                 ipath_set_ib_lstate(dd, INFINIPATH_IBCC_LINKINITCMD_SLEEP <<
148                                     INFINIPATH_IBCC_LINKINITCMD_SHIFT);
149                 /* don't wait */
150                 ret = 0;
151                 goto bail;
152
153         case IPATH_IB_LINKDOWN_DISABLE:
154                 ipath_set_ib_lstate(dd,
155                                     INFINIPATH_IBCC_LINKINITCMD_DISABLE <<
156                                     INFINIPATH_IBCC_LINKINITCMD_SHIFT);
157                 /* don't wait */
158                 ret = 0;
159                 goto bail;
160
161         case IPATH_IB_LINKINIT:
162                 if (dd->ipath_flags & IPATH_LINKINIT) {
163                         ret = 0;
164                         goto bail;
165                 }
166                 ipath_set_ib_lstate(dd, INFINIPATH_IBCC_LINKCMD_INIT <<
167                                     INFINIPATH_IBCC_LINKCMD_SHIFT);
168                 lstate = IPATH_LINKINIT;
169                 break;
170
171         case IPATH_IB_LINKARM:
172                 if (dd->ipath_flags & IPATH_LINKARMED) {
173                         ret = 0;
174                         goto bail;
175                 }
176                 if (!(dd->ipath_flags &
177                       (IPATH_LINKINIT | IPATH_LINKACTIVE))) {
178                         ret = -EINVAL;
179                         goto bail;
180                 }
181                 ipath_set_ib_lstate(dd, INFINIPATH_IBCC_LINKCMD_ARMED <<
182                                     INFINIPATH_IBCC_LINKCMD_SHIFT);
183                 /*
184                  * Since the port can transition to ACTIVE by receiving
185                  * a non VL 15 packet, wait for either state.
186                  */
187                 lstate = IPATH_LINKARMED | IPATH_LINKACTIVE;
188                 break;
189
190         case IPATH_IB_LINKACTIVE:
191                 if (dd->ipath_flags & IPATH_LINKACTIVE) {
192                         ret = 0;
193                         goto bail;
194                 }
195                 if (!(dd->ipath_flags & IPATH_LINKARMED)) {
196                         ret = -EINVAL;
197                         goto bail;
198                 }
199                 ipath_set_ib_lstate(dd, INFINIPATH_IBCC_LINKCMD_ACTIVE <<
200                                     INFINIPATH_IBCC_LINKCMD_SHIFT);
201                 lstate = IPATH_LINKACTIVE;
202                 break;
203
204         default:
205                 ipath_dbg("Invalid linkstate 0x%x requested\n", newstate);
206                 ret = -EINVAL;
207                 goto bail;
208         }
209         ret = ipath_wait_linkstate(dd, lstate, 2000);
210
211 bail:
212         return ret;
213 }
214
215 EXPORT_SYMBOL_GPL(ipath_layer_set_linkstate);
216
217 /**
218  * ipath_layer_set_mtu - set the MTU
219  * @dd: the infinipath device
220  * @arg: the new MTU
221  *
222  * we can handle "any" incoming size, the issue here is whether we
223  * need to restrict our outgoing size.   For now, we don't do any
224  * sanity checking on this, and we don't deal with what happens to
225  * programs that are already running when the size changes.
226  * NOTE: changing the MTU will usually cause the IBC to go back to
227  * link initialize (IPATH_IBSTATE_INIT) state...
228  */
229 int ipath_layer_set_mtu(struct ipath_devdata *dd, u16 arg)
230 {
231         u32 piosize;
232         int changed = 0;
233         int ret;
234
235         /*
236          * mtu is IB data payload max.  It's the largest power of 2 less
237          * than piosize (or even larger, since it only really controls the
238          * largest we can receive; we can send the max of the mtu and
239          * piosize).  We check that it's one of the valid IB sizes.
240          */
241         if (arg != 256 && arg != 512 && arg != 1024 && arg != 2048 &&
242             arg != 4096) {
243                 ipath_dbg("Trying to set invalid mtu %u, failing\n", arg);
244                 ret = -EINVAL;
245                 goto bail;
246         }
247         if (dd->ipath_ibmtu == arg) {
248                 ret = 0;        /* same as current */
249                 goto bail;
250         }
251
252         piosize = dd->ipath_ibmaxlen;
253         dd->ipath_ibmtu = arg;
254
255         if (arg >= (piosize - IPATH_PIO_MAXIBHDR)) {
256                 /* Only if it's not the initial value (or reset to it) */
257                 if (piosize != dd->ipath_init_ibmaxlen) {
258                         dd->ipath_ibmaxlen = piosize;
259                         changed = 1;
260                 }
261         } else if ((arg + IPATH_PIO_MAXIBHDR) != dd->ipath_ibmaxlen) {
262                 piosize = arg + IPATH_PIO_MAXIBHDR;
263                 ipath_cdbg(VERBOSE, "ibmaxlen was 0x%x, setting to 0x%x "
264                            "(mtu 0x%x)\n", dd->ipath_ibmaxlen, piosize,
265                            arg);
266                 dd->ipath_ibmaxlen = piosize;
267                 changed = 1;
268         }
269
270         if (changed) {
271                 /*
272                  * set the IBC maxpktlength to the size of our pio
273                  * buffers in words
274                  */
275                 u64 ibc = dd->ipath_ibcctrl;
276                 ibc &= ~(INFINIPATH_IBCC_MAXPKTLEN_MASK <<
277                          INFINIPATH_IBCC_MAXPKTLEN_SHIFT);
278
279                 piosize = piosize - 2 * sizeof(u32);    /* ignore pbc */
280                 dd->ipath_ibmaxlen = piosize;
281                 piosize /= sizeof(u32); /* in words */
282                 /*
283                  * for ICRC, which we only send in diag test pkt mode, and
284                  * we don't need to worry about that for mtu
285                  */
286                 piosize += 1;
287
288                 ibc |= piosize << INFINIPATH_IBCC_MAXPKTLEN_SHIFT;
289                 dd->ipath_ibcctrl = ibc;
290                 ipath_write_kreg(dd, dd->ipath_kregs->kr_ibcctrl,
291                                  dd->ipath_ibcctrl);
292                 dd->ipath_f_tidtemplate(dd);
293         }
294
295         ret = 0;
296
297 bail:
298         return ret;
299 }
300
301 EXPORT_SYMBOL_GPL(ipath_layer_set_mtu);
302
303 int ipath_set_lid(struct ipath_devdata *dd, u32 arg, u8 lmc)
304 {
305         dd->ipath_lid = arg;
306         dd->ipath_lmc = lmc;
307
308         mutex_lock(&ipath_layer_mutex);
309
310         if (dd->ipath_layer.l_arg && layer_intr)
311                 layer_intr(dd->ipath_layer.l_arg, IPATH_LAYER_INT_LID);
312
313         mutex_unlock(&ipath_layer_mutex);
314
315         return 0;
316 }
317
318 EXPORT_SYMBOL_GPL(ipath_set_lid);
319
320 int ipath_layer_set_guid(struct ipath_devdata *dd, __be64 guid)
321 {
322         /* XXX - need to inform anyone who cares this just happened. */
323         dd->ipath_guid = guid;
324         return 0;
325 }
326
327 EXPORT_SYMBOL_GPL(ipath_layer_set_guid);
328
329 __be64 ipath_layer_get_guid(struct ipath_devdata *dd)
330 {
331         return dd->ipath_guid;
332 }
333
334 EXPORT_SYMBOL_GPL(ipath_layer_get_guid);
335
336 u32 ipath_layer_get_nguid(struct ipath_devdata *dd)
337 {
338         return dd->ipath_nguid;
339 }
340
341 EXPORT_SYMBOL_GPL(ipath_layer_get_nguid);
342
343 u32 ipath_layer_get_majrev(struct ipath_devdata *dd)
344 {
345         return dd->ipath_majrev;
346 }
347
348 EXPORT_SYMBOL_GPL(ipath_layer_get_majrev);
349
350 u32 ipath_layer_get_minrev(struct ipath_devdata *dd)
351 {
352         return dd->ipath_minrev;
353 }
354
355 EXPORT_SYMBOL_GPL(ipath_layer_get_minrev);
356
357 u32 ipath_layer_get_pcirev(struct ipath_devdata *dd)
358 {
359         return dd->ipath_pcirev;
360 }
361
362 EXPORT_SYMBOL_GPL(ipath_layer_get_pcirev);
363
364 u32 ipath_layer_get_flags(struct ipath_devdata *dd)
365 {
366         return dd->ipath_flags;
367 }
368
369 EXPORT_SYMBOL_GPL(ipath_layer_get_flags);
370
371 struct device *ipath_layer_get_device(struct ipath_devdata *dd)
372 {
373         return &dd->pcidev->dev;
374 }
375
376 EXPORT_SYMBOL_GPL(ipath_layer_get_device);
377
378 u16 ipath_layer_get_deviceid(struct ipath_devdata *dd)
379 {
380         return dd->ipath_deviceid;
381 }
382
383 EXPORT_SYMBOL_GPL(ipath_layer_get_deviceid);
384
385 u32 ipath_layer_get_vendorid(struct ipath_devdata *dd)
386 {
387         return dd->ipath_vendorid;
388 }
389
390 EXPORT_SYMBOL_GPL(ipath_layer_get_vendorid);
391
392 u64 ipath_layer_get_lastibcstat(struct ipath_devdata *dd)
393 {
394         return dd->ipath_lastibcstat;
395 }
396
397 EXPORT_SYMBOL_GPL(ipath_layer_get_lastibcstat);
398
399 u32 ipath_layer_get_ibmtu(struct ipath_devdata *dd)
400 {
401         return dd->ipath_ibmtu;
402 }
403
404 EXPORT_SYMBOL_GPL(ipath_layer_get_ibmtu);
405
406 void ipath_layer_add(struct ipath_devdata *dd)
407 {
408         mutex_lock(&ipath_layer_mutex);
409
410         if (layer_add_one)
411                 dd->ipath_layer.l_arg =
412                         layer_add_one(dd->ipath_unit, dd);
413
414         if (verbs_add_one)
415                 dd->verbs_layer.l_arg =
416                         verbs_add_one(dd->ipath_unit, dd);
417
418         mutex_unlock(&ipath_layer_mutex);
419 }
420
421 void ipath_layer_remove(struct ipath_devdata *dd)
422 {
423         mutex_lock(&ipath_layer_mutex);
424
425         if (dd->ipath_layer.l_arg && layer_remove_one) {
426                 layer_remove_one(dd->ipath_layer.l_arg);
427                 dd->ipath_layer.l_arg = NULL;
428         }
429
430         if (dd->verbs_layer.l_arg && verbs_remove_one) {
431                 verbs_remove_one(dd->verbs_layer.l_arg);
432                 dd->verbs_layer.l_arg = NULL;
433         }
434
435         mutex_unlock(&ipath_layer_mutex);
436 }
437
438 int ipath_layer_register(void *(*l_add)(int, struct ipath_devdata *),
439                          void (*l_remove)(void *),
440                          int (*l_intr)(void *, u32),
441                          int (*l_rcv)(void *, void *, struct sk_buff *),
442                          u16 l_rcv_opcode,
443                          int (*l_rcv_lid)(void *, void *))
444 {
445         struct ipath_devdata *dd, *tmp;
446         unsigned long flags;
447
448         mutex_lock(&ipath_layer_mutex);
449
450         layer_add_one = l_add;
451         layer_remove_one = l_remove;
452         layer_intr = l_intr;
453         layer_rcv = l_rcv;
454         layer_rcv_lid = l_rcv_lid;
455         ipath_layer_rcv_opcode = l_rcv_opcode;
456
457         spin_lock_irqsave(&ipath_devs_lock, flags);
458
459         list_for_each_entry_safe(dd, tmp, &ipath_dev_list, ipath_list) {
460                 if (!(dd->ipath_flags & IPATH_INITTED))
461                         continue;
462
463                 if (dd->ipath_layer.l_arg)
464                         continue;
465
466                 if (!(*dd->ipath_statusp & IPATH_STATUS_SMA))
467                         *dd->ipath_statusp |= IPATH_STATUS_OIB_SMA;
468
469                 spin_unlock_irqrestore(&ipath_devs_lock, flags);
470                 dd->ipath_layer.l_arg = l_add(dd->ipath_unit, dd);
471                 spin_lock_irqsave(&ipath_devs_lock, flags);
472         }
473
474         spin_unlock_irqrestore(&ipath_devs_lock, flags);
475         mutex_unlock(&ipath_layer_mutex);
476
477         return 0;
478 }
479
480 EXPORT_SYMBOL_GPL(ipath_layer_register);
481
482 void ipath_layer_unregister(void)
483 {
484         struct ipath_devdata *dd, *tmp;
485         unsigned long flags;
486
487         mutex_lock(&ipath_layer_mutex);
488         spin_lock_irqsave(&ipath_devs_lock, flags);
489
490         list_for_each_entry_safe(dd, tmp, &ipath_dev_list, ipath_list) {
491                 if (dd->ipath_layer.l_arg && layer_remove_one) {
492                         spin_unlock_irqrestore(&ipath_devs_lock, flags);
493                         layer_remove_one(dd->ipath_layer.l_arg);
494                         spin_lock_irqsave(&ipath_devs_lock, flags);
495                         dd->ipath_layer.l_arg = NULL;
496                 }
497         }
498
499         spin_unlock_irqrestore(&ipath_devs_lock, flags);
500
501         layer_add_one = NULL;
502         layer_remove_one = NULL;
503         layer_intr = NULL;
504         layer_rcv = NULL;
505         layer_rcv_lid = NULL;
506
507         mutex_unlock(&ipath_layer_mutex);
508 }
509
510 EXPORT_SYMBOL_GPL(ipath_layer_unregister);
511
512 static void __ipath_verbs_timer(unsigned long arg)
513 {
514         struct ipath_devdata *dd = (struct ipath_devdata *) arg;
515
516         /*
517          * If port 0 receive packet interrupts are not available, or
518          * can be missed, poll the receive queue
519          */
520         if (dd->ipath_flags & IPATH_POLL_RX_INTR)
521                 ipath_kreceive(dd);
522
523         /* Handle verbs layer timeouts. */
524         if (dd->verbs_layer.l_arg && verbs_timer_cb)
525                 verbs_timer_cb(dd->verbs_layer.l_arg);
526
527         mod_timer(&dd->verbs_layer.l_timer, jiffies + 1);
528 }
529
530 /**
531  * ipath_verbs_register - verbs layer registration
532  * @l_piobufavail: callback for when PIO buffers become available
533  * @l_rcv: callback for receiving a packet
534  * @l_timer_cb: timer callback
535  * @ipath_devdata: device data structure is put here
536  */
537 int ipath_verbs_register(void *(*l_add)(int, struct ipath_devdata *),
538                          void (*l_remove)(void *arg),
539                          int (*l_piobufavail) (void *arg),
540                          void (*l_rcv) (void *arg, void *rhdr,
541                                         void *data, u32 tlen),
542                          void (*l_timer_cb) (void *arg))
543 {
544         struct ipath_devdata *dd, *tmp;
545         unsigned long flags;
546
547         mutex_lock(&ipath_layer_mutex);
548
549         verbs_add_one = l_add;
550         verbs_remove_one = l_remove;
551         verbs_piobufavail = l_piobufavail;
552         verbs_rcv = l_rcv;
553         verbs_timer_cb = l_timer_cb;
554
555         spin_lock_irqsave(&ipath_devs_lock, flags);
556
557         list_for_each_entry_safe(dd, tmp, &ipath_dev_list, ipath_list) {
558                 if (!(dd->ipath_flags & IPATH_INITTED))
559                         continue;
560
561                 if (dd->verbs_layer.l_arg)
562                         continue;
563
564                 spin_unlock_irqrestore(&ipath_devs_lock, flags);
565                 dd->verbs_layer.l_arg = l_add(dd->ipath_unit, dd);
566                 spin_lock_irqsave(&ipath_devs_lock, flags);
567         }
568
569         spin_unlock_irqrestore(&ipath_devs_lock, flags);
570         mutex_unlock(&ipath_layer_mutex);
571
572         ipath_verbs_registered = 1;
573
574         return 0;
575 }
576
577 EXPORT_SYMBOL_GPL(ipath_verbs_register);
578
579 void ipath_verbs_unregister(void)
580 {
581         struct ipath_devdata *dd, *tmp;
582         unsigned long flags;
583
584         mutex_lock(&ipath_layer_mutex);
585         spin_lock_irqsave(&ipath_devs_lock, flags);
586
587         list_for_each_entry_safe(dd, tmp, &ipath_dev_list, ipath_list) {
588                 *dd->ipath_statusp &= ~IPATH_STATUS_OIB_SMA;
589
590                 if (dd->verbs_layer.l_arg && verbs_remove_one) {
591                         spin_unlock_irqrestore(&ipath_devs_lock, flags);
592                         verbs_remove_one(dd->verbs_layer.l_arg);
593                         spin_lock_irqsave(&ipath_devs_lock, flags);
594                         dd->verbs_layer.l_arg = NULL;
595                 }
596         }
597
598         spin_unlock_irqrestore(&ipath_devs_lock, flags);
599
600         verbs_add_one = NULL;
601         verbs_remove_one = NULL;
602         verbs_piobufavail = NULL;
603         verbs_rcv = NULL;
604         verbs_timer_cb = NULL;
605
606         ipath_verbs_registered = 0;
607
608         mutex_unlock(&ipath_layer_mutex);
609 }
610
611 EXPORT_SYMBOL_GPL(ipath_verbs_unregister);
612
613 int ipath_layer_open(struct ipath_devdata *dd, u32 * pktmax)
614 {
615         int ret;
616         u32 intval = 0;
617
618         mutex_lock(&ipath_layer_mutex);
619
620         if (!dd->ipath_layer.l_arg) {
621                 ret = -EINVAL;
622                 goto bail;
623         }
624
625         ret = ipath_setrcvhdrsize(dd, IPATH_HEADER_QUEUE_WORDS);
626
627         if (ret < 0)
628                 goto bail;
629
630         *pktmax = dd->ipath_ibmaxlen;
631
632         if (*dd->ipath_statusp & IPATH_STATUS_IB_READY)
633                 intval |= IPATH_LAYER_INT_IF_UP;
634         if (dd->ipath_lid)
635                 intval |= IPATH_LAYER_INT_LID;
636         if (dd->ipath_mlid)
637                 intval |= IPATH_LAYER_INT_BCAST;
638         /*
639          * do this on open, in case low level is already up and
640          * just layered driver was reloaded, etc.
641          */
642         if (intval)
643                 layer_intr(dd->ipath_layer.l_arg, intval);
644
645         ret = 0;
646 bail:
647         mutex_unlock(&ipath_layer_mutex);
648
649         return ret;
650 }
651
652 EXPORT_SYMBOL_GPL(ipath_layer_open);
653
654 u16 ipath_layer_get_lid(struct ipath_devdata *dd)
655 {
656         return dd->ipath_lid;
657 }
658
659 EXPORT_SYMBOL_GPL(ipath_layer_get_lid);
660
661 /**
662  * ipath_layer_get_mac - get the MAC address
663  * @dd: the infinipath device
664  * @mac: the MAC is put here
665  *
666  * This is the EUID-64 OUI octets (top 3), then
667  * skip the next 2 (which should both be zero or 0xff).
668  * The returned MAC is in network order
669  * mac points to at least 6 bytes of buffer
670  * We assume that by the time the LID is set, that the GUID is as valid
671  * as it's ever going to be, rather than adding yet another status bit.
672  */
673
674 int ipath_layer_get_mac(struct ipath_devdata *dd, u8 * mac)
675 {
676         u8 *guid;
677
678         guid = (u8 *) &dd->ipath_guid;
679
680         mac[0] = guid[0];
681         mac[1] = guid[1];
682         mac[2] = guid[2];
683         mac[3] = guid[5];
684         mac[4] = guid[6];
685         mac[5] = guid[7];
686         if ((guid[3] || guid[4]) && !(guid[3] == 0xff && guid[4] == 0xff))
687                 ipath_dbg("Warning, guid bytes 3 and 4 not 0 or 0xffff: "
688                           "%x %x\n", guid[3], guid[4]);
689         return 0;
690 }
691
692 EXPORT_SYMBOL_GPL(ipath_layer_get_mac);
693
694 u16 ipath_layer_get_bcast(struct ipath_devdata *dd)
695 {
696         return dd->ipath_mlid;
697 }
698
699 EXPORT_SYMBOL_GPL(ipath_layer_get_bcast);
700
701 u32 ipath_layer_get_cr_errpkey(struct ipath_devdata *dd)
702 {
703         return ipath_read_creg32(dd, dd->ipath_cregs->cr_errpkey);
704 }
705
706 EXPORT_SYMBOL_GPL(ipath_layer_get_cr_errpkey);
707
708 static void update_sge(struct ipath_sge_state *ss, u32 length)
709 {
710         struct ipath_sge *sge = &ss->sge;
711
712         sge->vaddr += length;
713         sge->length -= length;
714         sge->sge_length -= length;
715         if (sge->sge_length == 0) {
716                 if (--ss->num_sge)
717                         *sge = *ss->sg_list++;
718         } else if (sge->length == 0 && sge->mr != NULL) {
719                 if (++sge->n >= IPATH_SEGSZ) {
720                         if (++sge->m >= sge->mr->mapsz)
721                                 return;
722                         sge->n = 0;
723                 }
724                 sge->vaddr = sge->mr->map[sge->m]->segs[sge->n].vaddr;
725                 sge->length = sge->mr->map[sge->m]->segs[sge->n].length;
726         }
727 }
728
729 #ifdef __LITTLE_ENDIAN
730 static inline u32 get_upper_bits(u32 data, u32 shift)
731 {
732         return data >> shift;
733 }
734
735 static inline u32 set_upper_bits(u32 data, u32 shift)
736 {
737         return data << shift;
738 }
739
740 static inline u32 clear_upper_bytes(u32 data, u32 n, u32 off)
741 {
742         data <<= ((sizeof(u32) - n) * BITS_PER_BYTE);
743         data >>= ((sizeof(u32) - n - off) * BITS_PER_BYTE);
744         return data;
745 }
746 #else
747 static inline u32 get_upper_bits(u32 data, u32 shift)
748 {
749         return data << shift;
750 }
751
752 static inline u32 set_upper_bits(u32 data, u32 shift)
753 {
754         return data >> shift;
755 }
756
757 static inline u32 clear_upper_bytes(u32 data, u32 n, u32 off)
758 {
759         data >>= ((sizeof(u32) - n) * BITS_PER_BYTE);
760         data <<= ((sizeof(u32) - n - off) * BITS_PER_BYTE);
761         return data;
762 }
763 #endif
764
765 static void copy_io(u32 __iomem *piobuf, struct ipath_sge_state *ss,
766                     u32 length)
767 {
768         u32 extra = 0;
769         u32 data = 0;
770         u32 last;
771
772         while (1) {
773                 u32 len = ss->sge.length;
774                 u32 off;
775
776                 BUG_ON(len == 0);
777                 if (len > length)
778                         len = length;
779                 if (len > ss->sge.sge_length)
780                         len = ss->sge.sge_length;
781                 /* If the source address is not aligned, try to align it. */
782                 off = (unsigned long)ss->sge.vaddr & (sizeof(u32) - 1);
783                 if (off) {
784                         u32 *addr = (u32 *)((unsigned long)ss->sge.vaddr &
785                                             ~(sizeof(u32) - 1));
786                         u32 v = get_upper_bits(*addr, off * BITS_PER_BYTE);
787                         u32 y;
788
789                         y = sizeof(u32) - off;
790                         if (len > y)
791                                 len = y;
792                         if (len + extra >= sizeof(u32)) {
793                                 data |= set_upper_bits(v, extra *
794                                                        BITS_PER_BYTE);
795                                 len = sizeof(u32) - extra;
796                                 if (len == length) {
797                                         last = data;
798                                         break;
799                                 }
800                                 __raw_writel(data, piobuf);
801                                 piobuf++;
802                                 extra = 0;
803                                 data = 0;
804                         } else {
805                                 /* Clear unused upper bytes */
806                                 data |= clear_upper_bytes(v, len, extra);
807                                 if (len == length) {
808                                         last = data;
809                                         break;
810                                 }
811                                 extra += len;
812                         }
813                 } else if (extra) {
814                         /* Source address is aligned. */
815                         u32 *addr = (u32 *) ss->sge.vaddr;
816                         int shift = extra * BITS_PER_BYTE;
817                         int ushift = 32 - shift;
818                         u32 l = len;
819
820                         while (l >= sizeof(u32)) {
821                                 u32 v = *addr;
822
823                                 data |= set_upper_bits(v, shift);
824                                 __raw_writel(data, piobuf);
825                                 data = get_upper_bits(v, ushift);
826                                 piobuf++;
827                                 addr++;
828                                 l -= sizeof(u32);
829                         }
830                         /*
831                          * We still have 'extra' number of bytes leftover.
832                          */
833                         if (l) {
834                                 u32 v = *addr;
835
836                                 if (l + extra >= sizeof(u32)) {
837                                         data |= set_upper_bits(v, shift);
838                                         len -= l + extra - sizeof(u32);
839                                         if (len == length) {
840                                                 last = data;
841                                                 break;
842                                         }
843                                         __raw_writel(data, piobuf);
844                                         piobuf++;
845                                         extra = 0;
846                                         data = 0;
847                                 } else {
848                                         /* Clear unused upper bytes */
849                                         data |= clear_upper_bytes(v, l,
850                                                                   extra);
851                                         if (len == length) {
852                                                 last = data;
853                                                 break;
854                                         }
855                                         extra += l;
856                                 }
857                         } else if (len == length) {
858                                 last = data;
859                                 break;
860                         }
861                 } else if (len == length) {
862                         u32 w;
863
864                         /*
865                          * Need to round up for the last dword in the
866                          * packet.
867                          */
868                         w = (len + 3) >> 2;
869                         __iowrite32_copy(piobuf, ss->sge.vaddr, w - 1);
870                         piobuf += w - 1;
871                         last = ((u32 *) ss->sge.vaddr)[w - 1];
872                         break;
873                 } else {
874                         u32 w = len >> 2;
875
876                         __iowrite32_copy(piobuf, ss->sge.vaddr, w);
877                         piobuf += w;
878
879                         extra = len & (sizeof(u32) - 1);
880                         if (extra) {
881                                 u32 v = ((u32 *) ss->sge.vaddr)[w];
882
883                                 /* Clear unused upper bytes */
884                                 data = clear_upper_bytes(v, extra, 0);
885                         }
886                 }
887                 update_sge(ss, len);
888                 length -= len;
889         }
890         /* Update address before sending packet. */
891         update_sge(ss, length);
892         /* must flush early everything before trigger word */
893         ipath_flush_wc();
894         __raw_writel(last, piobuf);
895         /* be sure trigger word is written */
896         ipath_flush_wc();
897 }
898
899 /**
900  * ipath_verbs_send - send a packet from the verbs layer
901  * @dd: the infinipath device
902  * @hdrwords: the number of words in the header
903  * @hdr: the packet header
904  * @len: the length of the packet in bytes
905  * @ss: the SGE to send
906  *
907  * This is like ipath_sma_send_pkt() in that we need to be able to send
908  * packets after the chip is initialized (MADs) but also like
909  * ipath_layer_send_hdr() since its used by the verbs layer.
910  */
911 int ipath_verbs_send(struct ipath_devdata *dd, u32 hdrwords,
912                      u32 *hdr, u32 len, struct ipath_sge_state *ss)
913 {
914         u32 __iomem *piobuf;
915         u32 plen;
916         int ret;
917
918         /* +1 is for the qword padding of pbc */
919         plen = hdrwords + ((len + 3) >> 2) + 1;
920         if (unlikely((plen << 2) > dd->ipath_ibmaxlen)) {
921                 ipath_dbg("packet len 0x%x too long, failing\n", plen);
922                 ret = -EINVAL;
923                 goto bail;
924         }
925
926         /* Get a PIO buffer to use. */
927         piobuf = ipath_getpiobuf(dd, NULL);
928         if (unlikely(piobuf == NULL)) {
929                 ret = -EBUSY;
930                 goto bail;
931         }
932
933         /*
934          * Write len to control qword, no flags.
935          * We have to flush after the PBC for correctness on some cpus
936          * or WC buffer can be written out of order.
937          */
938         writeq(plen, piobuf);
939         ipath_flush_wc();
940         piobuf += 2;
941         if (len == 0) {
942                 /*
943                  * If there is just the header portion, must flush before
944                  * writing last word of header for correctness, and after
945                  * the last header word (trigger word).
946                  */
947                 __iowrite32_copy(piobuf, hdr, hdrwords - 1);
948                 ipath_flush_wc();
949                 __raw_writel(hdr[hdrwords - 1], piobuf + hdrwords - 1);
950                 ipath_flush_wc();
951                 ret = 0;
952                 goto bail;
953         }
954
955         __iowrite32_copy(piobuf, hdr, hdrwords);
956         piobuf += hdrwords;
957
958         /* The common case is aligned and contained in one segment. */
959         if (likely(ss->num_sge == 1 && len <= ss->sge.length &&
960                    !((unsigned long)ss->sge.vaddr & (sizeof(u32) - 1)))) {
961                 u32 w;
962                 u32 *addr = (u32 *) ss->sge.vaddr;
963
964                 /* Update address before sending packet. */
965                 update_sge(ss, len);
966                 /* Need to round up for the last dword in the packet. */
967                 w = (len + 3) >> 2;
968                 __iowrite32_copy(piobuf, addr, w - 1);
969                 /* must flush early everything before trigger word */
970                 ipath_flush_wc();
971                 __raw_writel(addr[w - 1], piobuf + w - 1);
972                 /* be sure trigger word is written */
973                 ipath_flush_wc();
974                 ret = 0;
975                 goto bail;
976         }
977         copy_io(piobuf, ss, len);
978         ret = 0;
979
980 bail:
981         return ret;
982 }
983
984 EXPORT_SYMBOL_GPL(ipath_verbs_send);
985
986 int ipath_layer_snapshot_counters(struct ipath_devdata *dd, u64 *swords,
987                                   u64 *rwords, u64 *spkts, u64 *rpkts,
988                                   u64 *xmit_wait)
989 {
990         int ret;
991
992         if (!(dd->ipath_flags & IPATH_INITTED)) {
993                 /* no hardware, freeze, etc. */
994                 ipath_dbg("unit %u not usable\n", dd->ipath_unit);
995                 ret = -EINVAL;
996                 goto bail;
997         }
998         *swords = ipath_snap_cntr(dd, dd->ipath_cregs->cr_wordsendcnt);
999         *rwords = ipath_snap_cntr(dd, dd->ipath_cregs->cr_wordrcvcnt);
1000         *spkts = ipath_snap_cntr(dd, dd->ipath_cregs->cr_pktsendcnt);
1001         *rpkts = ipath_snap_cntr(dd, dd->ipath_cregs->cr_pktrcvcnt);
1002         *xmit_wait = ipath_snap_cntr(dd, dd->ipath_cregs->cr_sendstallcnt);
1003
1004         ret = 0;
1005
1006 bail:
1007         return ret;
1008 }
1009
1010 EXPORT_SYMBOL_GPL(ipath_layer_snapshot_counters);
1011
1012 /**
1013  * ipath_layer_get_counters - get various chip counters
1014  * @dd: the infinipath device
1015  * @cntrs: counters are placed here
1016  *
1017  * Return the counters needed by recv_pma_get_portcounters().
1018  */
1019 int ipath_layer_get_counters(struct ipath_devdata *dd,
1020                               struct ipath_layer_counters *cntrs)
1021 {
1022         int ret;
1023
1024         if (!(dd->ipath_flags & IPATH_INITTED)) {
1025                 /* no hardware, freeze, etc. */
1026                 ipath_dbg("unit %u not usable\n", dd->ipath_unit);
1027                 ret = -EINVAL;
1028                 goto bail;
1029         }
1030         cntrs->symbol_error_counter =
1031                 ipath_snap_cntr(dd, dd->ipath_cregs->cr_ibsymbolerrcnt);
1032         cntrs->link_error_recovery_counter =
1033                 ipath_snap_cntr(dd, dd->ipath_cregs->cr_iblinkerrrecovcnt);
1034         /*
1035          * The link downed counter counts when the other side downs the
1036          * connection.  We add in the number of times we downed the link
1037          * due to local link integrity errors to compensate.
1038          */
1039         cntrs->link_downed_counter =
1040                 ipath_snap_cntr(dd, dd->ipath_cregs->cr_iblinkdowncnt);
1041         cntrs->port_rcv_errors =
1042                 ipath_snap_cntr(dd, dd->ipath_cregs->cr_rxdroppktcnt) +
1043                 ipath_snap_cntr(dd, dd->ipath_cregs->cr_rcvovflcnt) +
1044                 ipath_snap_cntr(dd, dd->ipath_cregs->cr_portovflcnt) +
1045                 ipath_snap_cntr(dd, dd->ipath_cregs->cr_err_rlencnt) +
1046                 ipath_snap_cntr(dd, dd->ipath_cregs->cr_invalidrlencnt) +
1047                 ipath_snap_cntr(dd, dd->ipath_cregs->cr_erricrccnt) +
1048                 ipath_snap_cntr(dd, dd->ipath_cregs->cr_errvcrccnt) +
1049                 ipath_snap_cntr(dd, dd->ipath_cregs->cr_errlpcrccnt) +
1050                 ipath_snap_cntr(dd, dd->ipath_cregs->cr_badformatcnt);
1051         cntrs->port_rcv_remphys_errors =
1052                 ipath_snap_cntr(dd, dd->ipath_cregs->cr_rcvebpcnt);
1053         cntrs->port_xmit_discards =
1054                 ipath_snap_cntr(dd, dd->ipath_cregs->cr_unsupvlcnt);
1055         cntrs->port_xmit_data =
1056                 ipath_snap_cntr(dd, dd->ipath_cregs->cr_wordsendcnt);
1057         cntrs->port_rcv_data =
1058                 ipath_snap_cntr(dd, dd->ipath_cregs->cr_wordrcvcnt);
1059         cntrs->port_xmit_packets =
1060                 ipath_snap_cntr(dd, dd->ipath_cregs->cr_pktsendcnt);
1061         cntrs->port_rcv_packets =
1062                 ipath_snap_cntr(dd, dd->ipath_cregs->cr_pktrcvcnt);
1063         cntrs->local_link_integrity_errors = dd->ipath_lli_errors;
1064         cntrs->excessive_buffer_overrun_errors = 0; /* XXX */
1065
1066         ret = 0;
1067
1068 bail:
1069         return ret;
1070 }
1071
1072 EXPORT_SYMBOL_GPL(ipath_layer_get_counters);
1073
1074 int ipath_layer_want_buffer(struct ipath_devdata *dd)
1075 {
1076         set_bit(IPATH_S_PIOINTBUFAVAIL, &dd->ipath_sendctrl);
1077         ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl,
1078                          dd->ipath_sendctrl);
1079
1080         return 0;
1081 }
1082
1083 EXPORT_SYMBOL_GPL(ipath_layer_want_buffer);
1084
1085 int ipath_layer_send_hdr(struct ipath_devdata *dd, struct ether_header *hdr)
1086 {
1087         int ret = 0;
1088         u32 __iomem *piobuf;
1089         u32 plen, *uhdr;
1090         size_t count;
1091         __be16 vlsllnh;
1092
1093         if (!(dd->ipath_flags & IPATH_RCVHDRSZ_SET)) {
1094                 ipath_dbg("send while not open\n");
1095                 ret = -EINVAL;
1096         } else
1097                 if ((dd->ipath_flags & (IPATH_LINKUNK | IPATH_LINKDOWN)) ||
1098                     dd->ipath_lid == 0) {
1099                         /*
1100                          * lid check is for when sma hasn't yet configured
1101                          */
1102                         ret = -ENETDOWN;
1103                         ipath_cdbg(VERBOSE, "send while not ready, "
1104                                    "mylid=%u, flags=0x%x\n",
1105                                    dd->ipath_lid, dd->ipath_flags);
1106                 }
1107
1108         vlsllnh = *((__be16 *) hdr);
1109         if (vlsllnh != htons(IPATH_LRH_BTH)) {
1110                 ipath_dbg("Warning: lrh[0] wrong (%x, not %x); "
1111                           "not sending\n", be16_to_cpu(vlsllnh),
1112                           IPATH_LRH_BTH);
1113                 ret = -EINVAL;
1114         }
1115         if (ret)
1116                 goto done;
1117
1118         /* Get a PIO buffer to use. */
1119         piobuf = ipath_getpiobuf(dd, NULL);
1120         if (piobuf == NULL) {
1121                 ret = -EBUSY;
1122                 goto done;
1123         }
1124
1125         plen = (sizeof(*hdr) >> 2); /* actual length */
1126         ipath_cdbg(EPKT, "0x%x+1w pio %p\n", plen, piobuf);
1127
1128         writeq(plen+1, piobuf); /* len (+1 for pad) to pbc, no flags */
1129         ipath_flush_wc();
1130         piobuf += 2;
1131         uhdr = (u32 *)hdr;
1132         count = plen-1; /* amount we can copy before trigger word */
1133         __iowrite32_copy(piobuf, uhdr, count);
1134         ipath_flush_wc();
1135         __raw_writel(uhdr[count], piobuf + count);
1136         ipath_flush_wc(); /* ensure it's sent, now */
1137
1138         ipath_stats.sps_ether_spkts++;  /* ether packet sent */
1139
1140 done:
1141         return ret;
1142 }
1143
1144 EXPORT_SYMBOL_GPL(ipath_layer_send_hdr);
1145
1146 int ipath_layer_set_piointbufavail_int(struct ipath_devdata *dd)
1147 {
1148         set_bit(IPATH_S_PIOINTBUFAVAIL, &dd->ipath_sendctrl);
1149
1150         ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl,
1151                          dd->ipath_sendctrl);
1152         return 0;
1153 }
1154
1155 EXPORT_SYMBOL_GPL(ipath_layer_set_piointbufavail_int);
1156
1157 int ipath_layer_enable_timer(struct ipath_devdata *dd)
1158 {
1159         /*
1160          * HT-400 has a design flaw where the chip and kernel idea
1161          * of the tail register don't always agree, and therefore we won't
1162          * get an interrupt on the next packet received.
1163          * If the board supports per packet receive interrupts, use it.
1164          * Otherwise, the timer function periodically checks for packets
1165          * to cover this case.
1166          * Either way, the timer is needed for verbs layer related
1167          * processing.
1168          */
1169         if (dd->ipath_flags & IPATH_GPIO_INTR) {
1170                 ipath_write_kreg(dd, dd->ipath_kregs->kr_debugportselect,
1171                                  0x2074076542310ULL);
1172                 /* Enable GPIO bit 2 interrupt */
1173                 ipath_write_kreg(dd, dd->ipath_kregs->kr_gpio_mask,
1174                                  (u64) (1 << 2));
1175         }
1176
1177         init_timer(&dd->verbs_layer.l_timer);
1178         dd->verbs_layer.l_timer.function = __ipath_verbs_timer;
1179         dd->verbs_layer.l_timer.data = (unsigned long)dd;
1180         dd->verbs_layer.l_timer.expires = jiffies + 1;
1181         add_timer(&dd->verbs_layer.l_timer);
1182
1183         return 0;
1184 }
1185
1186 EXPORT_SYMBOL_GPL(ipath_layer_enable_timer);
1187
1188 int ipath_layer_disable_timer(struct ipath_devdata *dd)
1189 {
1190         /* Disable GPIO bit 2 interrupt */
1191         if (dd->ipath_flags & IPATH_GPIO_INTR)
1192                 ipath_write_kreg(dd, dd->ipath_kregs->kr_gpio_mask, 0);
1193
1194         del_timer_sync(&dd->verbs_layer.l_timer);
1195
1196         return 0;
1197 }
1198
1199 EXPORT_SYMBOL_GPL(ipath_layer_disable_timer);
1200
1201 /**
1202  * ipath_layer_set_verbs_flags - set the verbs layer flags
1203  * @dd: the infinipath device
1204  * @flags: the flags to set
1205  */
1206 int ipath_layer_set_verbs_flags(struct ipath_devdata *dd, unsigned flags)
1207 {
1208         struct ipath_devdata *ss;
1209         unsigned long lflags;
1210
1211         spin_lock_irqsave(&ipath_devs_lock, lflags);
1212
1213         list_for_each_entry(ss, &ipath_dev_list, ipath_list) {
1214                 if (!(ss->ipath_flags & IPATH_INITTED))
1215                         continue;
1216                 if ((flags & IPATH_VERBS_KERNEL_SMA) &&
1217                     !(*ss->ipath_statusp & IPATH_STATUS_SMA))
1218                         *ss->ipath_statusp |= IPATH_STATUS_OIB_SMA;
1219                 else
1220                         *ss->ipath_statusp &= ~IPATH_STATUS_OIB_SMA;
1221         }
1222
1223         spin_unlock_irqrestore(&ipath_devs_lock, lflags);
1224
1225         return 0;
1226 }
1227
1228 EXPORT_SYMBOL_GPL(ipath_layer_set_verbs_flags);
1229
1230 /**
1231  * ipath_layer_get_npkeys - return the size of the PKEY table for port 0
1232  * @dd: the infinipath device
1233  */
1234 unsigned ipath_layer_get_npkeys(struct ipath_devdata *dd)
1235 {
1236         return ARRAY_SIZE(dd->ipath_pd[0]->port_pkeys);
1237 }
1238
1239 EXPORT_SYMBOL_GPL(ipath_layer_get_npkeys);
1240
1241 /**
1242  * ipath_layer_get_pkey - return the indexed PKEY from the port 0 PKEY table
1243  * @dd: the infinipath device
1244  * @index: the PKEY index
1245  */
1246 unsigned ipath_layer_get_pkey(struct ipath_devdata *dd, unsigned index)
1247 {
1248         unsigned ret;
1249
1250         if (index >= ARRAY_SIZE(dd->ipath_pd[0]->port_pkeys))
1251                 ret = 0;
1252         else
1253                 ret = dd->ipath_pd[0]->port_pkeys[index];
1254
1255         return ret;
1256 }
1257
1258 EXPORT_SYMBOL_GPL(ipath_layer_get_pkey);
1259
1260 /**
1261  * ipath_layer_get_pkeys - return the PKEY table for port 0
1262  * @dd: the infinipath device
1263  * @pkeys: the pkey table is placed here
1264  */
1265 int ipath_layer_get_pkeys(struct ipath_devdata *dd, u16 * pkeys)
1266 {
1267         struct ipath_portdata *pd = dd->ipath_pd[0];
1268
1269         memcpy(pkeys, pd->port_pkeys, sizeof(pd->port_pkeys));
1270
1271         return 0;
1272 }
1273
1274 EXPORT_SYMBOL_GPL(ipath_layer_get_pkeys);
1275
1276 /**
1277  * rm_pkey - decrecment the reference count for the given PKEY
1278  * @dd: the infinipath device
1279  * @key: the PKEY index
1280  *
1281  * Return true if this was the last reference and the hardware table entry
1282  * needs to be changed.
1283  */
1284 static int rm_pkey(struct ipath_devdata *dd, u16 key)
1285 {
1286         int i;
1287         int ret;
1288
1289         for (i = 0; i < ARRAY_SIZE(dd->ipath_pkeys); i++) {
1290                 if (dd->ipath_pkeys[i] != key)
1291                         continue;
1292                 if (atomic_dec_and_test(&dd->ipath_pkeyrefs[i])) {
1293                         dd->ipath_pkeys[i] = 0;
1294                         ret = 1;
1295                         goto bail;
1296                 }
1297                 break;
1298         }
1299
1300         ret = 0;
1301
1302 bail:
1303         return ret;
1304 }
1305
1306 /**
1307  * add_pkey - add the given PKEY to the hardware table
1308  * @dd: the infinipath device
1309  * @key: the PKEY
1310  *
1311  * Return an error code if unable to add the entry, zero if no change,
1312  * or 1 if the hardware PKEY register needs to be updated.
1313  */
1314 static int add_pkey(struct ipath_devdata *dd, u16 key)
1315 {
1316         int i;
1317         u16 lkey = key & 0x7FFF;
1318         int any = 0;
1319         int ret;
1320
1321         if (lkey == 0x7FFF) {
1322                 ret = 0;
1323                 goto bail;
1324         }
1325
1326         /* Look for an empty slot or a matching PKEY. */
1327         for (i = 0; i < ARRAY_SIZE(dd->ipath_pkeys); i++) {
1328                 if (!dd->ipath_pkeys[i]) {
1329                         any++;
1330                         continue;
1331                 }
1332                 /* If it matches exactly, try to increment the ref count */
1333                 if (dd->ipath_pkeys[i] == key) {
1334                         if (atomic_inc_return(&dd->ipath_pkeyrefs[i]) > 1) {
1335                                 ret = 0;
1336                                 goto bail;
1337                         }
1338                         /* Lost the race. Look for an empty slot below. */
1339                         atomic_dec(&dd->ipath_pkeyrefs[i]);
1340                         any++;
1341                 }
1342                 /*
1343                  * It makes no sense to have both the limited and unlimited
1344                  * PKEY set at the same time since the unlimited one will
1345                  * disable the limited one.
1346                  */
1347                 if ((dd->ipath_pkeys[i] & 0x7FFF) == lkey) {
1348                         ret = -EEXIST;
1349                         goto bail;
1350                 }
1351         }
1352         if (!any) {
1353                 ret = -EBUSY;
1354                 goto bail;
1355         }
1356         for (i = 0; i < ARRAY_SIZE(dd->ipath_pkeys); i++) {
1357                 if (!dd->ipath_pkeys[i] &&
1358                     atomic_inc_return(&dd->ipath_pkeyrefs[i]) == 1) {
1359                         /* for ipathstats, etc. */
1360                         ipath_stats.sps_pkeys[i] = lkey;
1361                         dd->ipath_pkeys[i] = key;
1362                         ret = 1;
1363                         goto bail;
1364                 }
1365         }
1366         ret = -EBUSY;
1367
1368 bail:
1369         return ret;
1370 }
1371
1372 /**
1373  * ipath_layer_set_pkeys - set the PKEY table for port 0
1374  * @dd: the infinipath device
1375  * @pkeys: the PKEY table
1376  */
1377 int ipath_layer_set_pkeys(struct ipath_devdata *dd, u16 * pkeys)
1378 {
1379         struct ipath_portdata *pd;
1380         int i;
1381         int changed = 0;
1382
1383         pd = dd->ipath_pd[0];
1384
1385         for (i = 0; i < ARRAY_SIZE(pd->port_pkeys); i++) {
1386                 u16 key = pkeys[i];
1387                 u16 okey = pd->port_pkeys[i];
1388
1389                 if (key == okey)
1390                         continue;
1391                 /*
1392                  * The value of this PKEY table entry is changing.
1393                  * Remove the old entry in the hardware's array of PKEYs.
1394                  */
1395                 if (okey & 0x7FFF)
1396                         changed |= rm_pkey(dd, okey);
1397                 if (key & 0x7FFF) {
1398                         int ret = add_pkey(dd, key);
1399
1400                         if (ret < 0)
1401                                 key = 0;
1402                         else
1403                                 changed |= ret;
1404                 }
1405                 pd->port_pkeys[i] = key;
1406         }
1407         if (changed) {
1408                 u64 pkey;
1409
1410                 pkey = (u64) dd->ipath_pkeys[0] |
1411                         ((u64) dd->ipath_pkeys[1] << 16) |
1412                         ((u64) dd->ipath_pkeys[2] << 32) |
1413                         ((u64) dd->ipath_pkeys[3] << 48);
1414                 ipath_cdbg(VERBOSE, "p0 new pkey reg %llx\n",
1415                            (unsigned long long) pkey);
1416                 ipath_write_kreg(dd, dd->ipath_kregs->kr_partitionkey,
1417                                  pkey);
1418         }
1419         return 0;
1420 }
1421
1422 EXPORT_SYMBOL_GPL(ipath_layer_set_pkeys);
1423
1424 /**
1425  * ipath_layer_get_linkdowndefaultstate - get the default linkdown state
1426  * @dd: the infinipath device
1427  *
1428  * Returns zero if the default is POLL, 1 if the default is SLEEP.
1429  */
1430 int ipath_layer_get_linkdowndefaultstate(struct ipath_devdata *dd)
1431 {
1432         return !!(dd->ipath_ibcctrl & INFINIPATH_IBCC_LINKDOWNDEFAULTSTATE);
1433 }
1434
1435 EXPORT_SYMBOL_GPL(ipath_layer_get_linkdowndefaultstate);
1436
1437 /**
1438  * ipath_layer_set_linkdowndefaultstate - set the default linkdown state
1439  * @dd: the infinipath device
1440  * @sleep: the new state
1441  *
1442  * Note that this will only take effect when the link state changes.
1443  */
1444 int ipath_layer_set_linkdowndefaultstate(struct ipath_devdata *dd,
1445                                          int sleep)
1446 {
1447         if (sleep)
1448                 dd->ipath_ibcctrl |= INFINIPATH_IBCC_LINKDOWNDEFAULTSTATE;
1449         else
1450                 dd->ipath_ibcctrl &= ~INFINIPATH_IBCC_LINKDOWNDEFAULTSTATE;
1451         ipath_write_kreg(dd, dd->ipath_kregs->kr_ibcctrl,
1452                          dd->ipath_ibcctrl);
1453         return 0;
1454 }
1455
1456 EXPORT_SYMBOL_GPL(ipath_layer_set_linkdowndefaultstate);
1457
1458 int ipath_layer_get_phyerrthreshold(struct ipath_devdata *dd)
1459 {
1460         return (dd->ipath_ibcctrl >>
1461                 INFINIPATH_IBCC_PHYERRTHRESHOLD_SHIFT) &
1462                 INFINIPATH_IBCC_PHYERRTHRESHOLD_MASK;
1463 }
1464
1465 EXPORT_SYMBOL_GPL(ipath_layer_get_phyerrthreshold);
1466
1467 /**
1468  * ipath_layer_set_phyerrthreshold - set the physical error threshold
1469  * @dd: the infinipath device
1470  * @n: the new threshold
1471  *
1472  * Note that this will only take effect when the link state changes.
1473  */
1474 int ipath_layer_set_phyerrthreshold(struct ipath_devdata *dd, unsigned n)
1475 {
1476         unsigned v;
1477
1478         v = (dd->ipath_ibcctrl >> INFINIPATH_IBCC_PHYERRTHRESHOLD_SHIFT) &
1479                 INFINIPATH_IBCC_PHYERRTHRESHOLD_MASK;
1480         if (v != n) {
1481                 dd->ipath_ibcctrl &=
1482                         ~(INFINIPATH_IBCC_PHYERRTHRESHOLD_MASK <<
1483                           INFINIPATH_IBCC_PHYERRTHRESHOLD_SHIFT);
1484                 dd->ipath_ibcctrl |=
1485                         (u64) n << INFINIPATH_IBCC_PHYERRTHRESHOLD_SHIFT;
1486                 ipath_write_kreg(dd, dd->ipath_kregs->kr_ibcctrl,
1487                                  dd->ipath_ibcctrl);
1488         }
1489         return 0;
1490 }
1491
1492 EXPORT_SYMBOL_GPL(ipath_layer_set_phyerrthreshold);
1493
1494 int ipath_layer_get_overrunthreshold(struct ipath_devdata *dd)
1495 {
1496         return (dd->ipath_ibcctrl >>
1497                 INFINIPATH_IBCC_OVERRUNTHRESHOLD_SHIFT) &
1498                 INFINIPATH_IBCC_OVERRUNTHRESHOLD_MASK;
1499 }
1500
1501 EXPORT_SYMBOL_GPL(ipath_layer_get_overrunthreshold);
1502
1503 /**
1504  * ipath_layer_set_overrunthreshold - set the overrun threshold
1505  * @dd: the infinipath device
1506  * @n: the new threshold
1507  *
1508  * Note that this will only take effect when the link state changes.
1509  */
1510 int ipath_layer_set_overrunthreshold(struct ipath_devdata *dd, unsigned n)
1511 {
1512         unsigned v;
1513
1514         v = (dd->ipath_ibcctrl >> INFINIPATH_IBCC_OVERRUNTHRESHOLD_SHIFT) &
1515                 INFINIPATH_IBCC_OVERRUNTHRESHOLD_MASK;
1516         if (v != n) {
1517                 dd->ipath_ibcctrl &=
1518                         ~(INFINIPATH_IBCC_OVERRUNTHRESHOLD_MASK <<
1519                           INFINIPATH_IBCC_OVERRUNTHRESHOLD_SHIFT);
1520                 dd->ipath_ibcctrl |=
1521                         (u64) n << INFINIPATH_IBCC_OVERRUNTHRESHOLD_SHIFT;
1522                 ipath_write_kreg(dd, dd->ipath_kregs->kr_ibcctrl,
1523                                  dd->ipath_ibcctrl);
1524         }
1525         return 0;
1526 }
1527
1528 EXPORT_SYMBOL_GPL(ipath_layer_set_overrunthreshold);
1529
1530 int ipath_layer_get_boardname(struct ipath_devdata *dd, char *name,
1531                               size_t namelen)
1532 {
1533         return dd->ipath_f_get_boardname(dd, name, namelen);
1534 }
1535 EXPORT_SYMBOL_GPL(ipath_layer_get_boardname);
1536
1537 u32 ipath_layer_get_rcvhdrentsize(struct ipath_devdata *dd)
1538 {
1539         return dd->ipath_rcvhdrentsize;
1540 }
1541 EXPORT_SYMBOL_GPL(ipath_layer_get_rcvhdrentsize);