4 * Armin Kuster akuster@mvista.com
7 * Copyright 2002 MontaVista Softare Inc.
9 * This program is free software; you can redistribute it and/or modify it
10 * under the terms of the GNU General Public License as published by the
11 * Free Software Foundation; either version 2 of the License, or (at your
12 * option) any later version.
15 #include <linux/config.h>
16 #include <linux/module.h>
17 #include <linux/kernel.h>
18 #include <linux/errno.h>
19 #include <linux/netdevice.h>
20 #include <linux/init.h>
21 #include <linux/dma-mapping.h>
27 #include "ibm_emac_mal.h"
29 // Locking: Should we share a lock with the client ? The client could provide
30 // a lock pointer (optionally) in the commac structure... I don't think this is
31 // really necessary though
33 /* This lock protects the commac list. On today UP implementations, it's
34 * really only used as IRQ protection in mal_{register,unregister}_commac()
36 static rwlock_t mal_list_lock = RW_LOCK_UNLOCKED;
38 int mal_register_commac(struct ibm_ocp_mal *mal, struct mal_commac *commac)
42 write_lock_irqsave(&mal_list_lock, flags);
44 /* Don't let multiple commacs claim the same channel */
45 if ((mal->tx_chan_mask & commac->tx_chan_mask) ||
46 (mal->rx_chan_mask & commac->rx_chan_mask)) {
47 write_unlock_irqrestore(&mal_list_lock, flags);
51 mal->tx_chan_mask |= commac->tx_chan_mask;
52 mal->rx_chan_mask |= commac->rx_chan_mask;
54 list_add(&commac->list, &mal->commac);
56 write_unlock_irqrestore(&mal_list_lock, flags);
63 int mal_unregister_commac(struct ibm_ocp_mal *mal, struct mal_commac *commac)
67 write_lock_irqsave(&mal_list_lock, flags);
69 mal->tx_chan_mask &= ~commac->tx_chan_mask;
70 mal->rx_chan_mask &= ~commac->rx_chan_mask;
72 list_del_init(&commac->list);
74 write_unlock_irqrestore(&mal_list_lock, flags);
81 int mal_set_rcbs(struct ibm_ocp_mal *mal, int channel, unsigned long size)
85 set_mal_dcrn(mal, DCRN_MALRCBS0, size);
89 set_mal_dcrn(mal, DCRN_MALRCBS1, size);
94 set_mal_dcrn(mal, DCRN_MALRCBS2, size);
99 set_mal_dcrn(mal, DCRN_MALRCBS3, size);
109 static irqreturn_t mal_serr(int irq, void *dev_instance, struct pt_regs *regs)
111 struct ibm_ocp_mal *mal = dev_instance;
112 unsigned long mal_error;
115 * This SERR applies to one of the devices on the MAL, here we charge
116 * it against the first EMAC registered for the MAL.
119 mal_error = get_mal_dcrn(mal, DCRN_MALESR);
121 printk(KERN_ERR "%s: System Error (MALESR=%lx)\n",
122 "MAL" /* FIXME: get the name right */ , mal_error);
124 /* FIXME: decipher error */
125 /* DIXME: distribute to commacs, if possible */
127 /* Clear the error status register */
128 set_mal_dcrn(mal, DCRN_MALESR, mal_error);
133 static irqreturn_t mal_txeob(int irq, void *dev_instance, struct pt_regs *regs)
135 struct ibm_ocp_mal *mal = dev_instance;
139 isr = get_mal_dcrn(mal, DCRN_MALTXEOBISR);
140 set_mal_dcrn(mal, DCRN_MALTXEOBISR, isr);
142 read_lock(&mal_list_lock);
143 list_for_each(l, &mal->commac) {
144 struct mal_commac *mc = list_entry(l, struct mal_commac, list);
146 if (isr & mc->tx_chan_mask) {
147 mc->ops->txeob(mc->dev, isr & mc->tx_chan_mask);
150 read_unlock(&mal_list_lock);
155 static irqreturn_t mal_rxeob(int irq, void *dev_instance, struct pt_regs *regs)
157 struct ibm_ocp_mal *mal = dev_instance;
161 isr = get_mal_dcrn(mal, DCRN_MALRXEOBISR);
162 set_mal_dcrn(mal, DCRN_MALRXEOBISR, isr);
164 read_lock(&mal_list_lock);
165 list_for_each(l, &mal->commac) {
166 struct mal_commac *mc = list_entry(l, struct mal_commac, list);
168 if (isr & mc->rx_chan_mask) {
169 mc->ops->rxeob(mc->dev, isr & mc->rx_chan_mask);
172 read_unlock(&mal_list_lock);
177 static irqreturn_t mal_txde(int irq, void *dev_instance, struct pt_regs *regs)
179 struct ibm_ocp_mal *mal = dev_instance;
183 deir = get_mal_dcrn(mal, DCRN_MALTXDEIR);
185 /* FIXME: print which MAL correctly */
186 printk(KERN_WARNING "%s: Tx descriptor error (MALTXDEIR=%lx)\n",
189 read_lock(&mal_list_lock);
190 list_for_each(l, &mal->commac) {
191 struct mal_commac *mc = list_entry(l, struct mal_commac, list);
193 if (deir & mc->tx_chan_mask) {
194 mc->ops->txde(mc->dev, deir & mc->tx_chan_mask);
197 read_unlock(&mal_list_lock);
203 * This interrupt should be very rare at best. This occurs when
204 * the hardware has a problem with the receive descriptors. The manual
205 * states that it occurs when the hardware cannot the receive descriptor
206 * empty bit is not set. The recovery mechanism will be to
207 * traverse through the descriptors, handle any that are marked to be
208 * handled and reinitialize each along the way. At that point the driver
211 static irqreturn_t mal_rxde(int irq, void *dev_instance, struct pt_regs *regs)
213 struct ibm_ocp_mal *mal = dev_instance;
217 deir = get_mal_dcrn(mal, DCRN_MALRXDEIR);
220 * This really is needed. This case encountered in stress testing.
225 /* FIXME: print which MAL correctly */
226 printk(KERN_WARNING "%s: Rx descriptor error (MALRXDEIR=%lx)\n",
229 read_lock(&mal_list_lock);
230 list_for_each(l, &mal->commac) {
231 struct mal_commac *mc = list_entry(l, struct mal_commac, list);
233 if (deir & mc->rx_chan_mask) {
234 mc->ops->rxde(mc->dev, deir & mc->rx_chan_mask);
237 read_unlock(&mal_list_lock);
242 static int __init mal_probe(struct ocp_device *ocpdev)
244 struct ibm_ocp_mal *mal = NULL;
245 struct ocp_func_mal_data *maldata;
248 maldata = (struct ocp_func_mal_data *)ocpdev->def->additions;
249 if (maldata == NULL) {
250 printk(KERN_ERR "mal%d: Missing additional datas !\n",
255 mal = kmalloc(sizeof(struct ibm_ocp_mal), GFP_KERNEL);
258 "mal%d: Out of memory allocating MAL structure !\n",
262 memset(mal, 0, sizeof(*mal));
264 switch (ocpdev->def->index) {
266 mal->dcrbase = DCRN_MAL_BASE;
268 #ifdef DCRN_MAL1_BASE
270 mal->dcrbase = DCRN_MAL1_BASE;
277 /**************************/
279 INIT_LIST_HEAD(&mal->commac);
281 set_mal_dcrn(mal, DCRN_MALRXCARR, 0xFFFFFFFF);
282 set_mal_dcrn(mal, DCRN_MALTXCARR, 0xFFFFFFFF);
284 set_mal_dcrn(mal, DCRN_MALCR, MALCR_MMSR); /* 384 */
285 /* FIXME: Add delay */
287 /* Set the MAL configuration register */
288 set_mal_dcrn(mal, DCRN_MALCR,
289 MALCR_PLBB | MALCR_OPBBL | MALCR_LEA |
290 MALCR_PLBLT_DEFAULT);
292 /* It would be nice to allocate buffers separately for each
293 * channel, but we can't because the channels share the upper
294 * 13 bits of address lines. Each channels buffer must also
295 * be 4k aligned, so we allocate 4k for each channel. This is
296 * inefficient FIXME: do better, if possible */
297 mal->tx_virt_addr = dma_alloc_coherent(&ocpdev->dev,
299 maldata->num_tx_chans,
300 &mal->tx_phys_addr, GFP_KERNEL);
301 if (mal->tx_virt_addr == NULL) {
303 "mal%d: Out of memory allocating MAL descriptors !\n",
309 /* God, oh, god, I hate DCRs */
310 set_mal_dcrn(mal, DCRN_MALTXCTP0R, mal->tx_phys_addr);
311 #ifdef DCRN_MALTXCTP1R
312 if (maldata->num_tx_chans > 1)
313 set_mal_dcrn(mal, DCRN_MALTXCTP1R,
314 mal->tx_phys_addr + MAL_DT_ALIGN);
315 #endif /* DCRN_MALTXCTP1R */
316 #ifdef DCRN_MALTXCTP2R
317 if (maldata->num_tx_chans > 2)
318 set_mal_dcrn(mal, DCRN_MALTXCTP2R,
319 mal->tx_phys_addr + 2 * MAL_DT_ALIGN);
320 #endif /* DCRN_MALTXCTP2R */
321 #ifdef DCRN_MALTXCTP3R
322 if (maldata->num_tx_chans > 3)
323 set_mal_dcrn(mal, DCRN_MALTXCTP3R,
324 mal->tx_phys_addr + 3 * MAL_DT_ALIGN);
325 #endif /* DCRN_MALTXCTP3R */
326 #ifdef DCRN_MALTXCTP4R
327 if (maldata->num_tx_chans > 4)
328 set_mal_dcrn(mal, DCRN_MALTXCTP4R,
329 mal->tx_phys_addr + 4 * MAL_DT_ALIGN);
330 #endif /* DCRN_MALTXCTP4R */
331 #ifdef DCRN_MALTXCTP5R
332 if (maldata->num_tx_chans > 5)
333 set_mal_dcrn(mal, DCRN_MALTXCTP5R,
334 mal->tx_phys_addr + 5 * MAL_DT_ALIGN);
335 #endif /* DCRN_MALTXCTP5R */
336 #ifdef DCRN_MALTXCTP6R
337 if (maldata->num_tx_chans > 6)
338 set_mal_dcrn(mal, DCRN_MALTXCTP6R,
339 mal->tx_phys_addr + 6 * MAL_DT_ALIGN);
340 #endif /* DCRN_MALTXCTP6R */
341 #ifdef DCRN_MALTXCTP7R
342 if (maldata->num_tx_chans > 7)
343 set_mal_dcrn(mal, DCRN_MALTXCTP7R,
344 mal->tx_phys_addr + 7 * MAL_DT_ALIGN);
345 #endif /* DCRN_MALTXCTP7R */
347 mal->rx_virt_addr = dma_alloc_coherent(&ocpdev->dev,
349 maldata->num_rx_chans,
350 &mal->rx_phys_addr, GFP_KERNEL);
352 set_mal_dcrn(mal, DCRN_MALRXCTP0R, mal->rx_phys_addr);
353 #ifdef DCRN_MALRXCTP1R
354 if (maldata->num_rx_chans > 1)
355 set_mal_dcrn(mal, DCRN_MALRXCTP1R,
356 mal->rx_phys_addr + MAL_DT_ALIGN);
357 #endif /* DCRN_MALRXCTP1R */
358 #ifdef DCRN_MALRXCTP2R
359 if (maldata->num_rx_chans > 2)
360 set_mal_dcrn(mal, DCRN_MALRXCTP2R,
361 mal->rx_phys_addr + 2 * MAL_DT_ALIGN);
362 #endif /* DCRN_MALRXCTP2R */
363 #ifdef DCRN_MALRXCTP3R
364 if (maldata->num_rx_chans > 3)
365 set_mal_dcrn(mal, DCRN_MALRXCTP3R,
366 mal->rx_phys_addr + 3 * MAL_DT_ALIGN);
367 #endif /* DCRN_MALRXCTP3R */
369 err = request_irq(maldata->serr_irq, mal_serr, 0, "MAL SERR", mal);
372 err = request_irq(maldata->txde_irq, mal_txde, 0, "MAL TX DE ", mal);
375 err = request_irq(maldata->txeob_irq, mal_txeob, 0, "MAL TX EOB", mal);
378 err = request_irq(maldata->rxde_irq, mal_rxde, 0, "MAL RX DE", mal);
381 err = request_irq(maldata->rxeob_irq, mal_rxeob, 0, "MAL RX EOB", mal);
385 set_mal_dcrn(mal, DCRN_MALIER,
386 MALIER_DE | MALIER_NE | MALIER_TE |
387 MALIER_OPBE | MALIER_PLBE);
389 /* Advertise me to the rest of the world */
390 ocp_set_drvdata(ocpdev, mal);
392 printk(KERN_INFO "mal%d: Initialized, %d tx channels, %d rx channels\n",
393 ocpdev->def->index, maldata->num_tx_chans,
394 maldata->num_rx_chans);
399 /* FIXME: dispose requested IRQs ! */
405 static void __exit mal_remove(struct ocp_device *ocpdev)
407 struct ibm_ocp_mal *mal = ocp_get_drvdata(ocpdev);
408 struct ocp_func_mal_data *maldata = ocpdev->def->additions;
412 ocp_set_drvdata(ocpdev, NULL);
414 /* FIXME: shut down the MAL, deal with dependency with emac */
415 free_irq(maldata->serr_irq, mal);
416 free_irq(maldata->txde_irq, mal);
417 free_irq(maldata->txeob_irq, mal);
418 free_irq(maldata->rxde_irq, mal);
419 free_irq(maldata->rxeob_irq, mal);
421 if (mal->tx_virt_addr)
422 dma_free_coherent(&ocpdev->dev,
423 MAL_DT_ALIGN * maldata->num_tx_chans,
424 mal->tx_virt_addr, mal->tx_phys_addr);
426 if (mal->rx_virt_addr)
427 dma_free_coherent(&ocpdev->dev,
428 MAL_DT_ALIGN * maldata->num_rx_chans,
429 mal->rx_virt_addr, mal->rx_phys_addr);
434 /* Structure for a device driver */
435 static struct ocp_device_id mal_ids[] = {
436 {.vendor = OCP_ANY_ID,.function = OCP_FUNC_MAL},
437 {.vendor = OCP_VENDOR_INVALID}
440 static struct ocp_driver mal_driver = {
445 .remove = mal_remove,
448 static int __init init_mals(void)
452 rc = ocp_register_driver(&mal_driver);
454 ocp_unregister_driver(&mal_driver);
461 static void __exit exit_mals(void)
463 ocp_unregister_driver(&mal_driver);
466 module_init(init_mals);
467 module_exit(exit_mals);