2 * sata_nv.c - NVIDIA nForce SATA
4 * Copyright 2004 NVIDIA Corp. All rights reserved.
5 * Copyright 2004 Andrew Chew
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2, or (at your option)
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
18 * You should have received a copy of the GNU General Public License
19 * along with this program; see the file COPYING. If not, write to
20 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
23 * libata documentation is available via 'make {ps|pdf}docs',
24 * as Documentation/DocBook/libata.*
26 * No hardware documentation available outside of NVIDIA.
27 * This driver programs the NVIDIA SATA controller in a similar
28 * fashion as with other PCI IDE BMDMA controllers, with a few
29 * NV-specific details such as register offsets, SATA phy location,
32 * CK804/MCP04 controllers support an alternate programming interface
33 * similar to the ADMA specification (with some modifications).
34 * This allows the use of NCQ. Non-DMA-mapped ATA commands are still
35 * sent through the legacy interface.
39 #include <linux/kernel.h>
40 #include <linux/module.h>
41 #include <linux/pci.h>
42 #include <linux/init.h>
43 #include <linux/blkdev.h>
44 #include <linux/delay.h>
45 #include <linux/interrupt.h>
46 #include <linux/device.h>
47 #include <scsi/scsi_host.h>
48 #include <scsi/scsi_device.h>
49 #include <linux/libata.h>
51 #define DRV_NAME "sata_nv"
52 #define DRV_VERSION "3.2"
54 #define NV_ADMA_DMA_BOUNDARY 0xffffffffUL
61 NV_PORT0_SCR_REG_OFFSET = 0x00,
62 NV_PORT1_SCR_REG_OFFSET = 0x40,
64 /* INT_STATUS/ENABLE */
67 NV_INT_STATUS_CK804 = 0x440,
68 NV_INT_ENABLE_CK804 = 0x441,
70 /* INT_STATUS/ENABLE bits */
74 NV_INT_REMOVED = 0x08,
76 NV_INT_PORT_SHIFT = 4, /* each port occupies 4 bits */
79 NV_INT_MASK = NV_INT_DEV |
80 NV_INT_ADDED | NV_INT_REMOVED,
84 NV_INT_CONFIG_METHD = 0x01, // 0 = INT, 1 = SMI
86 // For PCI config register 20
87 NV_MCP_SATA_CFG_20 = 0x50,
88 NV_MCP_SATA_CFG_20_SATA_SPACE_EN = 0x04,
89 NV_MCP_SATA_CFG_20_PORT0_EN = (1 << 17),
90 NV_MCP_SATA_CFG_20_PORT1_EN = (1 << 16),
91 NV_MCP_SATA_CFG_20_PORT0_PWB_EN = (1 << 14),
92 NV_MCP_SATA_CFG_20_PORT1_PWB_EN = (1 << 12),
94 NV_ADMA_MAX_CPBS = 32,
97 NV_ADMA_SGTBL_LEN = (1024 - NV_ADMA_CPB_SZ) /
99 NV_ADMA_SGTBL_TOTAL_LEN = NV_ADMA_SGTBL_LEN + 5,
100 NV_ADMA_SGTBL_SZ = NV_ADMA_SGTBL_LEN * NV_ADMA_APRD_SZ,
101 NV_ADMA_PORT_PRIV_DMA_SZ = NV_ADMA_MAX_CPBS *
102 (NV_ADMA_CPB_SZ + NV_ADMA_SGTBL_SZ),
104 /* BAR5 offset to ADMA general registers */
106 NV_ADMA_GEN_CTL = 0x00,
107 NV_ADMA_NOTIFIER_CLEAR = 0x30,
109 /* BAR5 offset to ADMA ports */
110 NV_ADMA_PORT = 0x480,
112 /* size of ADMA port register space */
113 NV_ADMA_PORT_SIZE = 0x100,
115 /* ADMA port registers */
117 NV_ADMA_CPB_COUNT = 0x42,
118 NV_ADMA_NEXT_CPB_IDX = 0x43,
120 NV_ADMA_CPB_BASE_LOW = 0x48,
121 NV_ADMA_CPB_BASE_HIGH = 0x4C,
122 NV_ADMA_APPEND = 0x50,
123 NV_ADMA_NOTIFIER = 0x68,
124 NV_ADMA_NOTIFIER_ERROR = 0x6C,
126 /* NV_ADMA_CTL register bits */
127 NV_ADMA_CTL_HOTPLUG_IEN = (1 << 0),
128 NV_ADMA_CTL_CHANNEL_RESET = (1 << 5),
129 NV_ADMA_CTL_GO = (1 << 7),
130 NV_ADMA_CTL_AIEN = (1 << 8),
131 NV_ADMA_CTL_READ_NON_COHERENT = (1 << 11),
132 NV_ADMA_CTL_WRITE_NON_COHERENT = (1 << 12),
134 /* CPB response flag bits */
135 NV_CPB_RESP_DONE = (1 << 0),
136 NV_CPB_RESP_ATA_ERR = (1 << 3),
137 NV_CPB_RESP_CMD_ERR = (1 << 4),
138 NV_CPB_RESP_CPB_ERR = (1 << 7),
140 /* CPB control flag bits */
141 NV_CPB_CTL_CPB_VALID = (1 << 0),
142 NV_CPB_CTL_QUEUE = (1 << 1),
143 NV_CPB_CTL_APRD_VALID = (1 << 2),
144 NV_CPB_CTL_IEN = (1 << 3),
145 NV_CPB_CTL_FPDMA = (1 << 4),
148 NV_APRD_WRITE = (1 << 1),
149 NV_APRD_END = (1 << 2),
150 NV_APRD_CONT = (1 << 3),
152 /* NV_ADMA_STAT flags */
153 NV_ADMA_STAT_TIMEOUT = (1 << 0),
154 NV_ADMA_STAT_HOTUNPLUG = (1 << 1),
155 NV_ADMA_STAT_HOTPLUG = (1 << 2),
156 NV_ADMA_STAT_CPBERR = (1 << 4),
157 NV_ADMA_STAT_SERROR = (1 << 5),
158 NV_ADMA_STAT_CMD_COMPLETE = (1 << 6),
159 NV_ADMA_STAT_IDLE = (1 << 8),
160 NV_ADMA_STAT_LEGACY = (1 << 9),
161 NV_ADMA_STAT_STOPPED = (1 << 10),
162 NV_ADMA_STAT_DONE = (1 << 12),
163 NV_ADMA_STAT_ERR = NV_ADMA_STAT_CPBERR |
164 NV_ADMA_STAT_TIMEOUT,
167 NV_ADMA_PORT_REGISTER_MODE = (1 << 0),
168 NV_ADMA_ATAPI_SETUP_COMPLETE = (1 << 1),
172 /* ADMA Physical Region Descriptor - one SG segment */
181 enum nv_adma_regbits {
182 CMDEND = (1 << 15), /* end of command list */
183 WNB = (1 << 14), /* wait-not-BSY */
184 IGN = (1 << 13), /* ignore this entry */
185 CS1n = (1 << (4 + 8)), /* std. PATA signals follow... */
186 DA2 = (1 << (2 + 8)),
187 DA1 = (1 << (1 + 8)),
188 DA0 = (1 << (0 + 8)),
191 /* ADMA Command Parameter Block
192 The first 5 SG segments are stored inside the Command Parameter Block itself.
193 If there are more than 5 segments the remainder are stored in a separate
194 memory area indicated by next_aprd. */
196 u8 resp_flags; /* 0 */
197 u8 reserved1; /* 1 */
198 u8 ctl_flags; /* 2 */
199 /* len is length of taskfile in 64 bit words */
202 u8 next_cpb_idx; /* 5 */
203 __le16 reserved2; /* 6-7 */
204 __le16 tf[12]; /* 8-31 */
205 struct nv_adma_prd aprd[5]; /* 32-111 */
206 __le64 next_aprd; /* 112-119 */
207 __le64 reserved3; /* 120-127 */
211 struct nv_adma_port_priv {
212 struct nv_adma_cpb *cpb;
214 struct nv_adma_prd *aprd;
220 #define NV_ADMA_CHECK_INTR(GCTL, PORT) ((GCTL) & ( 1 << (19 + (12 * (PORT)))))
222 static int nv_init_one (struct pci_dev *pdev, const struct pci_device_id *ent);
223 static void nv_ck804_host_stop(struct ata_host *host);
224 static irqreturn_t nv_generic_interrupt(int irq, void *dev_instance);
225 static irqreturn_t nv_nf2_interrupt(int irq, void *dev_instance);
226 static irqreturn_t nv_ck804_interrupt(int irq, void *dev_instance);
227 static u32 nv_scr_read (struct ata_port *ap, unsigned int sc_reg);
228 static void nv_scr_write (struct ata_port *ap, unsigned int sc_reg, u32 val);
230 static void nv_nf2_freeze(struct ata_port *ap);
231 static void nv_nf2_thaw(struct ata_port *ap);
232 static void nv_ck804_freeze(struct ata_port *ap);
233 static void nv_ck804_thaw(struct ata_port *ap);
234 static void nv_error_handler(struct ata_port *ap);
235 static int nv_adma_slave_config(struct scsi_device *sdev);
236 static int nv_adma_check_atapi_dma(struct ata_queued_cmd *qc);
237 static void nv_adma_qc_prep(struct ata_queued_cmd *qc);
238 static unsigned int nv_adma_qc_issue(struct ata_queued_cmd *qc);
239 static irqreturn_t nv_adma_interrupt(int irq, void *dev_instance);
240 static void nv_adma_irq_clear(struct ata_port *ap);
241 static int nv_adma_port_start(struct ata_port *ap);
242 static void nv_adma_port_stop(struct ata_port *ap);
243 static void nv_adma_error_handler(struct ata_port *ap);
244 static void nv_adma_host_stop(struct ata_host *host);
245 static void nv_adma_bmdma_setup(struct ata_queued_cmd *qc);
246 static void nv_adma_bmdma_start(struct ata_queued_cmd *qc);
247 static void nv_adma_bmdma_stop(struct ata_queued_cmd *qc);
248 static u8 nv_adma_bmdma_status(struct ata_port *ap);
254 NFORCE3 = NFORCE2, /* NF2 == NF3 as far as sata_nv is concerned */
259 static const struct pci_device_id nv_pci_tbl[] = {
260 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE2S_SATA), NFORCE2 },
261 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE3S_SATA), NFORCE3 },
262 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE3S_SATA2), NFORCE3 },
263 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_CK804_SATA), CK804 },
264 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_CK804_SATA2), CK804 },
265 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP04_SATA), CK804 },
266 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP04_SATA2), CK804 },
267 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_SATA), GENERIC },
268 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_SATA2), GENERIC },
269 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SATA), GENERIC },
270 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SATA2), GENERIC },
271 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_SATA), GENERIC },
272 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_SATA2), GENERIC },
273 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_SATA3), GENERIC },
274 { PCI_VENDOR_ID_NVIDIA, PCI_ANY_ID,
275 PCI_ANY_ID, PCI_ANY_ID,
276 PCI_CLASS_STORAGE_IDE<<8, 0xffff00, GENERIC },
277 { PCI_VENDOR_ID_NVIDIA, PCI_ANY_ID,
278 PCI_ANY_ID, PCI_ANY_ID,
279 PCI_CLASS_STORAGE_RAID<<8, 0xffff00, GENERIC },
281 { } /* terminate list */
284 static struct pci_driver nv_pci_driver = {
286 .id_table = nv_pci_tbl,
287 .probe = nv_init_one,
288 .remove = ata_pci_remove_one,
291 static struct scsi_host_template nv_sht = {
292 .module = THIS_MODULE,
294 .ioctl = ata_scsi_ioctl,
295 .queuecommand = ata_scsi_queuecmd,
296 .can_queue = ATA_DEF_QUEUE,
297 .this_id = ATA_SHT_THIS_ID,
298 .sg_tablesize = LIBATA_MAX_PRD,
299 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
300 .emulated = ATA_SHT_EMULATED,
301 .use_clustering = ATA_SHT_USE_CLUSTERING,
302 .proc_name = DRV_NAME,
303 .dma_boundary = ATA_DMA_BOUNDARY,
304 .slave_configure = ata_scsi_slave_config,
305 .slave_destroy = ata_scsi_slave_destroy,
306 .bios_param = ata_std_bios_param,
309 static struct scsi_host_template nv_adma_sht = {
310 .module = THIS_MODULE,
312 .ioctl = ata_scsi_ioctl,
313 .queuecommand = ata_scsi_queuecmd,
314 .can_queue = NV_ADMA_MAX_CPBS,
315 .this_id = ATA_SHT_THIS_ID,
316 .sg_tablesize = NV_ADMA_SGTBL_TOTAL_LEN,
317 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
318 .emulated = ATA_SHT_EMULATED,
319 .use_clustering = ATA_SHT_USE_CLUSTERING,
320 .proc_name = DRV_NAME,
321 .dma_boundary = NV_ADMA_DMA_BOUNDARY,
322 .slave_configure = nv_adma_slave_config,
323 .slave_destroy = ata_scsi_slave_destroy,
324 .bios_param = ata_std_bios_param,
327 static const struct ata_port_operations nv_generic_ops = {
328 .port_disable = ata_port_disable,
329 .tf_load = ata_tf_load,
330 .tf_read = ata_tf_read,
331 .exec_command = ata_exec_command,
332 .check_status = ata_check_status,
333 .dev_select = ata_std_dev_select,
334 .bmdma_setup = ata_bmdma_setup,
335 .bmdma_start = ata_bmdma_start,
336 .bmdma_stop = ata_bmdma_stop,
337 .bmdma_status = ata_bmdma_status,
338 .qc_prep = ata_qc_prep,
339 .qc_issue = ata_qc_issue_prot,
340 .freeze = ata_bmdma_freeze,
341 .thaw = ata_bmdma_thaw,
342 .error_handler = nv_error_handler,
343 .post_internal_cmd = ata_bmdma_post_internal_cmd,
344 .data_xfer = ata_pio_data_xfer,
345 .irq_handler = nv_generic_interrupt,
346 .irq_clear = ata_bmdma_irq_clear,
347 .scr_read = nv_scr_read,
348 .scr_write = nv_scr_write,
349 .port_start = ata_port_start,
350 .port_stop = ata_port_stop,
351 .host_stop = ata_pci_host_stop,
354 static const struct ata_port_operations nv_nf2_ops = {
355 .port_disable = ata_port_disable,
356 .tf_load = ata_tf_load,
357 .tf_read = ata_tf_read,
358 .exec_command = ata_exec_command,
359 .check_status = ata_check_status,
360 .dev_select = ata_std_dev_select,
361 .bmdma_setup = ata_bmdma_setup,
362 .bmdma_start = ata_bmdma_start,
363 .bmdma_stop = ata_bmdma_stop,
364 .bmdma_status = ata_bmdma_status,
365 .qc_prep = ata_qc_prep,
366 .qc_issue = ata_qc_issue_prot,
367 .freeze = nv_nf2_freeze,
369 .error_handler = nv_error_handler,
370 .post_internal_cmd = ata_bmdma_post_internal_cmd,
371 .data_xfer = ata_pio_data_xfer,
372 .irq_handler = nv_nf2_interrupt,
373 .irq_clear = ata_bmdma_irq_clear,
374 .scr_read = nv_scr_read,
375 .scr_write = nv_scr_write,
376 .port_start = ata_port_start,
377 .port_stop = ata_port_stop,
378 .host_stop = ata_pci_host_stop,
381 static const struct ata_port_operations nv_ck804_ops = {
382 .port_disable = ata_port_disable,
383 .tf_load = ata_tf_load,
384 .tf_read = ata_tf_read,
385 .exec_command = ata_exec_command,
386 .check_status = ata_check_status,
387 .dev_select = ata_std_dev_select,
388 .bmdma_setup = ata_bmdma_setup,
389 .bmdma_start = ata_bmdma_start,
390 .bmdma_stop = ata_bmdma_stop,
391 .bmdma_status = ata_bmdma_status,
392 .qc_prep = ata_qc_prep,
393 .qc_issue = ata_qc_issue_prot,
394 .freeze = nv_ck804_freeze,
395 .thaw = nv_ck804_thaw,
396 .error_handler = nv_error_handler,
397 .post_internal_cmd = ata_bmdma_post_internal_cmd,
398 .data_xfer = ata_pio_data_xfer,
399 .irq_handler = nv_ck804_interrupt,
400 .irq_clear = ata_bmdma_irq_clear,
401 .scr_read = nv_scr_read,
402 .scr_write = nv_scr_write,
403 .port_start = ata_port_start,
404 .port_stop = ata_port_stop,
405 .host_stop = nv_ck804_host_stop,
408 static const struct ata_port_operations nv_adma_ops = {
409 .port_disable = ata_port_disable,
410 .tf_load = ata_tf_load,
411 .tf_read = ata_tf_read,
412 .check_atapi_dma = nv_adma_check_atapi_dma,
413 .exec_command = ata_exec_command,
414 .check_status = ata_check_status,
415 .dev_select = ata_std_dev_select,
416 .bmdma_setup = nv_adma_bmdma_setup,
417 .bmdma_start = nv_adma_bmdma_start,
418 .bmdma_stop = nv_adma_bmdma_stop,
419 .bmdma_status = nv_adma_bmdma_status,
420 .qc_prep = nv_adma_qc_prep,
421 .qc_issue = nv_adma_qc_issue,
422 .freeze = nv_ck804_freeze,
423 .thaw = nv_ck804_thaw,
424 .error_handler = nv_adma_error_handler,
425 .post_internal_cmd = nv_adma_bmdma_stop,
426 .data_xfer = ata_mmio_data_xfer,
427 .irq_handler = nv_adma_interrupt,
428 .irq_clear = nv_adma_irq_clear,
429 .scr_read = nv_scr_read,
430 .scr_write = nv_scr_write,
431 .port_start = nv_adma_port_start,
432 .port_stop = nv_adma_port_stop,
433 .host_stop = nv_adma_host_stop,
436 static struct ata_port_info nv_port_info[] = {
440 .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
441 ATA_FLAG_HRST_TO_RESUME,
442 .pio_mask = NV_PIO_MASK,
443 .mwdma_mask = NV_MWDMA_MASK,
444 .udma_mask = NV_UDMA_MASK,
445 .port_ops = &nv_generic_ops,
450 .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
451 ATA_FLAG_HRST_TO_RESUME,
452 .pio_mask = NV_PIO_MASK,
453 .mwdma_mask = NV_MWDMA_MASK,
454 .udma_mask = NV_UDMA_MASK,
455 .port_ops = &nv_nf2_ops,
460 .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
461 ATA_FLAG_HRST_TO_RESUME,
462 .pio_mask = NV_PIO_MASK,
463 .mwdma_mask = NV_MWDMA_MASK,
464 .udma_mask = NV_UDMA_MASK,
465 .port_ops = &nv_ck804_ops,
470 .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
471 ATA_FLAG_MMIO | ATA_FLAG_NCQ,
472 .pio_mask = NV_PIO_MASK,
473 .mwdma_mask = NV_MWDMA_MASK,
474 .udma_mask = NV_UDMA_MASK,
475 .port_ops = &nv_adma_ops,
479 MODULE_AUTHOR("NVIDIA");
480 MODULE_DESCRIPTION("low-level driver for NVIDIA nForce SATA controller");
481 MODULE_LICENSE("GPL");
482 MODULE_DEVICE_TABLE(pci, nv_pci_tbl);
483 MODULE_VERSION(DRV_VERSION);
485 static int adma_enabled = 1;
487 static inline void __iomem *__nv_adma_ctl_block(void __iomem *mmio,
488 unsigned int port_no)
490 mmio += NV_ADMA_PORT + port_no * NV_ADMA_PORT_SIZE;
494 static inline void __iomem *nv_adma_ctl_block(struct ata_port *ap)
496 return __nv_adma_ctl_block(ap->host->mmio_base, ap->port_no);
499 static inline void __iomem *nv_adma_gen_block(struct ata_port *ap)
501 return (ap->host->mmio_base + NV_ADMA_GEN);
504 static inline void __iomem *nv_adma_notifier_clear_block(struct ata_port *ap)
506 return (nv_adma_gen_block(ap) + NV_ADMA_NOTIFIER_CLEAR + (4 * ap->port_no));
509 static void nv_adma_register_mode(struct ata_port *ap)
511 void __iomem *mmio = nv_adma_ctl_block(ap);
512 struct nv_adma_port_priv *pp = ap->private_data;
515 if (pp->flags & NV_ADMA_PORT_REGISTER_MODE)
518 tmp = readw(mmio + NV_ADMA_CTL);
519 writew(tmp & ~NV_ADMA_CTL_GO, mmio + NV_ADMA_CTL);
521 pp->flags |= NV_ADMA_PORT_REGISTER_MODE;
524 static void nv_adma_mode(struct ata_port *ap)
526 void __iomem *mmio = nv_adma_ctl_block(ap);
527 struct nv_adma_port_priv *pp = ap->private_data;
530 if (!(pp->flags & NV_ADMA_PORT_REGISTER_MODE))
533 WARN_ON(pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE);
535 tmp = readw(mmio + NV_ADMA_CTL);
536 writew(tmp | NV_ADMA_CTL_GO, mmio + NV_ADMA_CTL);
538 pp->flags &= ~NV_ADMA_PORT_REGISTER_MODE;
541 static int nv_adma_slave_config(struct scsi_device *sdev)
543 struct ata_port *ap = ata_shost_to_port(sdev->host);
544 struct nv_adma_port_priv *pp = ap->private_data;
545 struct pci_dev *pdev = to_pci_dev(ap->host->dev);
547 unsigned long segment_boundary;
548 unsigned short sg_tablesize;
551 u32 current_reg, new_reg, config_mask;
553 rc = ata_scsi_slave_config(sdev);
555 if (sdev->id >= ATA_MAX_DEVICES || sdev->channel || sdev->lun)
556 /* Not a proper libata device, ignore */
559 if (ap->device[sdev->id].class == ATA_DEV_ATAPI) {
561 * NVIDIA reports that ADMA mode does not support ATAPI commands.
562 * Therefore ATAPI commands are sent through the legacy interface.
563 * However, the legacy interface only supports 32-bit DMA.
564 * Restrict DMA parameters as required by the legacy interface
565 * when an ATAPI device is connected.
567 bounce_limit = ATA_DMA_MASK;
568 segment_boundary = ATA_DMA_BOUNDARY;
569 /* Subtract 1 since an extra entry may be needed for padding, see
571 sg_tablesize = LIBATA_MAX_PRD - 1;
573 /* Since the legacy DMA engine is in use, we need to disable ADMA
576 nv_adma_register_mode(ap);
579 bounce_limit = *ap->dev->dma_mask;
580 segment_boundary = NV_ADMA_DMA_BOUNDARY;
581 sg_tablesize = NV_ADMA_SGTBL_TOTAL_LEN;
585 pci_read_config_dword(pdev, NV_MCP_SATA_CFG_20, ¤t_reg);
588 config_mask = NV_MCP_SATA_CFG_20_PORT1_EN |
589 NV_MCP_SATA_CFG_20_PORT1_PWB_EN;
591 config_mask = NV_MCP_SATA_CFG_20_PORT0_EN |
592 NV_MCP_SATA_CFG_20_PORT0_PWB_EN;
595 new_reg = current_reg | config_mask;
596 pp->flags &= ~NV_ADMA_ATAPI_SETUP_COMPLETE;
599 new_reg = current_reg & ~config_mask;
600 pp->flags |= NV_ADMA_ATAPI_SETUP_COMPLETE;
603 if(current_reg != new_reg)
604 pci_write_config_dword(pdev, NV_MCP_SATA_CFG_20, new_reg);
606 blk_queue_bounce_limit(sdev->request_queue, bounce_limit);
607 blk_queue_segment_boundary(sdev->request_queue, segment_boundary);
608 blk_queue_max_hw_segments(sdev->request_queue, sg_tablesize);
609 ata_port_printk(ap, KERN_INFO,
610 "bounce limit 0x%llX, segment boundary 0x%lX, hw segs %hu\n",
611 (unsigned long long)bounce_limit, segment_boundary, sg_tablesize);
615 static int nv_adma_check_atapi_dma(struct ata_queued_cmd *qc)
617 struct nv_adma_port_priv *pp = qc->ap->private_data;
618 return !(pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE);
621 static unsigned int nv_adma_tf_to_cpb(struct ata_taskfile *tf, __le16 *cpb)
623 unsigned int idx = 0;
625 cpb[idx++] = cpu_to_le16((ATA_REG_DEVICE << 8) | tf->device | WNB);
627 if ((tf->flags & ATA_TFLAG_LBA48) == 0) {
628 cpb[idx++] = cpu_to_le16(IGN);
629 cpb[idx++] = cpu_to_le16(IGN);
630 cpb[idx++] = cpu_to_le16(IGN);
631 cpb[idx++] = cpu_to_le16(IGN);
632 cpb[idx++] = cpu_to_le16(IGN);
635 cpb[idx++] = cpu_to_le16((ATA_REG_ERR << 8) | tf->hob_feature);
636 cpb[idx++] = cpu_to_le16((ATA_REG_NSECT << 8) | tf->hob_nsect);
637 cpb[idx++] = cpu_to_le16((ATA_REG_LBAL << 8) | tf->hob_lbal);
638 cpb[idx++] = cpu_to_le16((ATA_REG_LBAM << 8) | tf->hob_lbam);
639 cpb[idx++] = cpu_to_le16((ATA_REG_LBAH << 8) | tf->hob_lbah);
641 cpb[idx++] = cpu_to_le16((ATA_REG_ERR << 8) | tf->feature);
642 cpb[idx++] = cpu_to_le16((ATA_REG_NSECT << 8) | tf->nsect);
643 cpb[idx++] = cpu_to_le16((ATA_REG_LBAL << 8) | tf->lbal);
644 cpb[idx++] = cpu_to_le16((ATA_REG_LBAM << 8) | tf->lbam);
645 cpb[idx++] = cpu_to_le16((ATA_REG_LBAH << 8) | tf->lbah);
647 cpb[idx++] = cpu_to_le16((ATA_REG_CMD << 8) | tf->command | CMDEND);
652 static void nv_adma_check_cpb(struct ata_port *ap, int cpb_num, int force_err)
654 struct nv_adma_port_priv *pp = ap->private_data;
655 int complete = 0, have_err = 0;
656 u8 flags = pp->cpb[cpb_num].resp_flags;
658 VPRINTK("CPB %d, flags=0x%x\n", cpb_num, flags);
660 if (flags & NV_CPB_RESP_DONE) {
661 VPRINTK("CPB flags done, flags=0x%x\n", flags);
664 if (flags & NV_CPB_RESP_ATA_ERR) {
665 ata_port_printk(ap, KERN_ERR, "CPB flags ATA err, flags=0x%x\n", flags);
669 if (flags & NV_CPB_RESP_CMD_ERR) {
670 ata_port_printk(ap, KERN_ERR, "CPB flags CMD err, flags=0x%x\n", flags);
674 if (flags & NV_CPB_RESP_CPB_ERR) {
675 ata_port_printk(ap, KERN_ERR, "CPB flags CPB err, flags=0x%x\n", flags);
679 if(complete || force_err)
681 struct ata_queued_cmd *qc = ata_qc_from_tag(ap, cpb_num);
684 /* Only use the ATA port status for non-NCQ commands.
685 For NCQ commands the current status may have nothing to do with
686 the command just completed. */
687 if(qc->tf.protocol != ATA_PROT_NCQ)
688 ata_status = readb(nv_adma_ctl_block(ap) + (ATA_REG_STATUS * 4));
690 if(have_err || force_err)
691 ata_status |= ATA_ERR;
693 qc->err_mask |= ac_err_mask(ata_status);
694 DPRINTK("Completing qc from tag %d with err_mask %u\n",cpb_num,
701 static int nv_host_intr(struct ata_port *ap, u8 irq_stat)
703 struct ata_queued_cmd *qc = ata_qc_from_tag(ap, ap->active_tag);
705 /* freeze if hotplugged */
706 if (unlikely(irq_stat & (NV_INT_ADDED | NV_INT_REMOVED))) {
711 /* bail out if not our interrupt */
712 if (!(irq_stat & NV_INT_DEV))
715 /* DEV interrupt w/ no active qc? */
716 if (unlikely(!qc || (qc->tf.flags & ATA_TFLAG_POLLING))) {
717 ata_check_status(ap);
721 /* handle interrupt */
722 return ata_host_intr(ap, qc);
725 static irqreturn_t nv_adma_interrupt(int irq, void *dev_instance)
727 struct ata_host *host = dev_instance;
729 u32 notifier_clears[2];
731 spin_lock(&host->lock);
733 for (i = 0; i < host->n_ports; i++) {
734 struct ata_port *ap = host->ports[i];
735 notifier_clears[i] = 0;
737 if (ap && !(ap->flags & ATA_FLAG_DISABLED)) {
738 struct nv_adma_port_priv *pp = ap->private_data;
739 void __iomem *mmio = nv_adma_ctl_block(ap);
742 int have_global_err = 0;
743 u32 notifier, notifier_error;
745 /* if in ATA register mode, use standard ata interrupt handler */
746 if (pp->flags & NV_ADMA_PORT_REGISTER_MODE) {
747 u8 irq_stat = readb(host->mmio_base + NV_INT_STATUS_CK804)
748 >> (NV_INT_PORT_SHIFT * i);
749 if(ata_tag_valid(ap->active_tag))
750 /** NV_INT_DEV indication seems unreliable at times
751 at least in ADMA mode. Force it on always when a
752 command is active, to prevent losing interrupts. */
753 irq_stat |= NV_INT_DEV;
754 handled += nv_host_intr(ap, irq_stat);
758 notifier = readl(mmio + NV_ADMA_NOTIFIER);
759 notifier_error = readl(mmio + NV_ADMA_NOTIFIER_ERROR);
760 notifier_clears[i] = notifier | notifier_error;
762 gen_ctl = readl(nv_adma_gen_block(ap) + NV_ADMA_GEN_CTL);
764 if( !NV_ADMA_CHECK_INTR(gen_ctl, ap->port_no) && !notifier &&
769 status = readw(mmio + NV_ADMA_STAT);
771 /* Clear status. Ensure the controller sees the clearing before we start
772 looking at any of the CPB statuses, so that any CPB completions after
773 this point in the handler will raise another interrupt. */
774 writew(status, mmio + NV_ADMA_STAT);
775 readw(mmio + NV_ADMA_STAT); /* flush posted write */
778 /* freeze if hotplugged */
779 if (unlikely(status & (NV_ADMA_STAT_HOTPLUG | NV_ADMA_STAT_HOTUNPLUG))) {
780 ata_port_printk(ap, KERN_NOTICE, "Hotplug event, freezing\n");
786 if (status & NV_ADMA_STAT_TIMEOUT) {
787 ata_port_printk(ap, KERN_ERR, "timeout, stat=0x%x\n", status);
790 if (status & NV_ADMA_STAT_CPBERR) {
791 ata_port_printk(ap, KERN_ERR, "CPB error, stat=0x%x\n", status);
794 if ((status & NV_ADMA_STAT_DONE) || have_global_err) {
795 /** Check CPBs for completed commands */
797 if(ata_tag_valid(ap->active_tag))
798 /* Non-NCQ command */
799 nv_adma_check_cpb(ap, ap->active_tag, have_global_err ||
800 (notifier_error & (1 << ap->active_tag)));
803 u32 active = ap->sactive;
804 while( (pos = ffs(active)) ) {
806 nv_adma_check_cpb(ap, pos, have_global_err ||
807 (notifier_error & (1 << pos)) );
808 active &= ~(1 << pos );
813 handled++; /* irq handled if we got here */
817 if(notifier_clears[0] || notifier_clears[1]) {
818 /* Note: Both notifier clear registers must be written
819 if either is set, even if one is zero, according to NVIDIA. */
820 writel(notifier_clears[0],
821 nv_adma_notifier_clear_block(host->ports[0]));
822 writel(notifier_clears[1],
823 nv_adma_notifier_clear_block(host->ports[1]));
826 spin_unlock(&host->lock);
828 return IRQ_RETVAL(handled);
831 static void nv_adma_irq_clear(struct ata_port *ap)
833 void __iomem *mmio = nv_adma_ctl_block(ap);
834 u16 status = readw(mmio + NV_ADMA_STAT);
835 u32 notifier = readl(mmio + NV_ADMA_NOTIFIER);
836 u32 notifier_error = readl(mmio + NV_ADMA_NOTIFIER_ERROR);
837 unsigned long dma_stat_addr = ap->ioaddr.bmdma_addr + ATA_DMA_STATUS;
839 /* clear ADMA status */
840 writew(status, mmio + NV_ADMA_STAT);
841 writel(notifier | notifier_error,
842 nv_adma_notifier_clear_block(ap));
844 /** clear legacy status */
845 outb(inb(dma_stat_addr), dma_stat_addr);
848 static void nv_adma_bmdma_setup(struct ata_queued_cmd *qc)
850 struct ata_port *ap = qc->ap;
851 unsigned int rw = (qc->tf.flags & ATA_TFLAG_WRITE);
852 struct nv_adma_port_priv *pp = ap->private_data;
855 if(!(pp->flags & NV_ADMA_PORT_REGISTER_MODE)) {
860 /* load PRD table addr. */
861 outl(ap->prd_dma, ap->ioaddr.bmdma_addr + ATA_DMA_TABLE_OFS);
863 /* specify data direction, triple-check start bit is clear */
864 dmactl = inb(ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
865 dmactl &= ~(ATA_DMA_WR | ATA_DMA_START);
867 dmactl |= ATA_DMA_WR;
869 outb(dmactl, ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
871 /* issue r/w command */
872 ata_exec_command(ap, &qc->tf);
875 static void nv_adma_bmdma_start(struct ata_queued_cmd *qc)
877 struct ata_port *ap = qc->ap;
878 struct nv_adma_port_priv *pp = ap->private_data;
881 if(!(pp->flags & NV_ADMA_PORT_REGISTER_MODE)) {
886 /* start host DMA transaction */
887 dmactl = inb(ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
888 outb(dmactl | ATA_DMA_START,
889 ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
892 static void nv_adma_bmdma_stop(struct ata_queued_cmd *qc)
894 struct ata_port *ap = qc->ap;
895 struct nv_adma_port_priv *pp = ap->private_data;
897 if(!(pp->flags & NV_ADMA_PORT_REGISTER_MODE))
900 /* clear start/stop bit */
901 outb(inb(ap->ioaddr.bmdma_addr + ATA_DMA_CMD) & ~ATA_DMA_START,
902 ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
904 /* one-PIO-cycle guaranteed wait, per spec, for HDMA1:0 transition */
905 ata_altstatus(ap); /* dummy read */
908 static u8 nv_adma_bmdma_status(struct ata_port *ap)
910 struct nv_adma_port_priv *pp = ap->private_data;
912 WARN_ON(!(pp->flags & NV_ADMA_PORT_REGISTER_MODE));
914 return inb(ap->ioaddr.bmdma_addr + ATA_DMA_STATUS);
917 static int nv_adma_port_start(struct ata_port *ap)
919 struct device *dev = ap->host->dev;
920 struct nv_adma_port_priv *pp;
924 void __iomem *mmio = nv_adma_ctl_block(ap);
929 rc = ata_port_start(ap);
933 pp = kzalloc(sizeof(*pp), GFP_KERNEL);
939 mem = dma_alloc_coherent(dev, NV_ADMA_PORT_PRIV_DMA_SZ,
940 &mem_dma, GFP_KERNEL);
946 memset(mem, 0, NV_ADMA_PORT_PRIV_DMA_SZ);
949 * First item in chunk of DMA memory:
950 * 128-byte command parameter block (CPB)
951 * one for each command tag
954 pp->cpb_dma = mem_dma;
956 writel(mem_dma & 0xFFFFFFFF, mmio + NV_ADMA_CPB_BASE_LOW);
957 writel((mem_dma >> 16 ) >> 16, mmio + NV_ADMA_CPB_BASE_HIGH);
959 mem += NV_ADMA_MAX_CPBS * NV_ADMA_CPB_SZ;
960 mem_dma += NV_ADMA_MAX_CPBS * NV_ADMA_CPB_SZ;
963 * Second item: block of ADMA_SGTBL_LEN s/g entries
966 pp->aprd_dma = mem_dma;
968 ap->private_data = pp;
970 /* clear any outstanding interrupt conditions */
971 writew(0xffff, mmio + NV_ADMA_STAT);
973 /* initialize port variables */
974 pp->flags = NV_ADMA_PORT_REGISTER_MODE;
976 /* clear CPB fetch count */
977 writew(0, mmio + NV_ADMA_CPB_COUNT);
979 /* clear GO for register mode */
980 tmp = readw(mmio + NV_ADMA_CTL);
981 writew(tmp & ~NV_ADMA_CTL_GO, mmio + NV_ADMA_CTL);
983 tmp = readw(mmio + NV_ADMA_CTL);
984 writew(tmp | NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
985 readl( mmio + NV_ADMA_CTL ); /* flush posted write */
987 writew(tmp & ~NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
988 readl( mmio + NV_ADMA_CTL ); /* flush posted write */
999 static void nv_adma_port_stop(struct ata_port *ap)
1001 struct device *dev = ap->host->dev;
1002 struct nv_adma_port_priv *pp = ap->private_data;
1003 void __iomem *mmio = nv_adma_ctl_block(ap);
1007 writew(0, mmio + NV_ADMA_CTL);
1009 ap->private_data = NULL;
1010 dma_free_coherent(dev, NV_ADMA_PORT_PRIV_DMA_SZ, pp->cpb, pp->cpb_dma);
1016 static void nv_adma_setup_port(struct ata_probe_ent *probe_ent, unsigned int port)
1018 void __iomem *mmio = probe_ent->mmio_base;
1019 struct ata_ioports *ioport = &probe_ent->port[port];
1023 mmio += NV_ADMA_PORT + port * NV_ADMA_PORT_SIZE;
1025 ioport->cmd_addr = (unsigned long) mmio;
1026 ioport->data_addr = (unsigned long) mmio + (ATA_REG_DATA * 4);
1027 ioport->error_addr =
1028 ioport->feature_addr = (unsigned long) mmio + (ATA_REG_ERR * 4);
1029 ioport->nsect_addr = (unsigned long) mmio + (ATA_REG_NSECT * 4);
1030 ioport->lbal_addr = (unsigned long) mmio + (ATA_REG_LBAL * 4);
1031 ioport->lbam_addr = (unsigned long) mmio + (ATA_REG_LBAM * 4);
1032 ioport->lbah_addr = (unsigned long) mmio + (ATA_REG_LBAH * 4);
1033 ioport->device_addr = (unsigned long) mmio + (ATA_REG_DEVICE * 4);
1034 ioport->status_addr =
1035 ioport->command_addr = (unsigned long) mmio + (ATA_REG_STATUS * 4);
1036 ioport->altstatus_addr =
1037 ioport->ctl_addr = (unsigned long) mmio + 0x20;
1040 static int nv_adma_host_init(struct ata_probe_ent *probe_ent)
1042 struct pci_dev *pdev = to_pci_dev(probe_ent->dev);
1048 /* enable ADMA on the ports */
1049 pci_read_config_dword(pdev, NV_MCP_SATA_CFG_20, &tmp32);
1050 tmp32 |= NV_MCP_SATA_CFG_20_PORT0_EN |
1051 NV_MCP_SATA_CFG_20_PORT0_PWB_EN |
1052 NV_MCP_SATA_CFG_20_PORT1_EN |
1053 NV_MCP_SATA_CFG_20_PORT1_PWB_EN;
1055 pci_write_config_dword(pdev, NV_MCP_SATA_CFG_20, tmp32);
1057 for (i = 0; i < probe_ent->n_ports; i++)
1058 nv_adma_setup_port(probe_ent, i);
1060 for (i = 0; i < probe_ent->n_ports; i++) {
1061 void __iomem *mmio = __nv_adma_ctl_block(probe_ent->mmio_base, i);
1064 /* enable interrupt, clear reset if not already clear */
1065 tmp = readw(mmio + NV_ADMA_CTL);
1066 writew(tmp | NV_ADMA_CTL_AIEN, mmio + NV_ADMA_CTL);
1072 static void nv_adma_fill_aprd(struct ata_queued_cmd *qc,
1073 struct scatterlist *sg,
1075 struct nv_adma_prd *aprd)
1079 memset(aprd, 0, sizeof(struct nv_adma_prd));
1082 if (qc->tf.flags & ATA_TFLAG_WRITE)
1083 flags |= NV_APRD_WRITE;
1084 if (idx == qc->n_elem - 1)
1085 flags |= NV_APRD_END;
1087 flags |= NV_APRD_CONT;
1089 aprd->addr = cpu_to_le64(((u64)sg_dma_address(sg)));
1090 aprd->len = cpu_to_le32(((u32)sg_dma_len(sg))); /* len in bytes */
1091 aprd->flags = flags;
1094 static void nv_adma_fill_sg(struct ata_queued_cmd *qc, struct nv_adma_cpb *cpb)
1096 struct nv_adma_port_priv *pp = qc->ap->private_data;
1098 struct nv_adma_prd *aprd;
1099 struct scatterlist *sg;
1105 ata_for_each_sg(sg, qc) {
1106 aprd = (idx < 5) ? &cpb->aprd[idx] : &pp->aprd[NV_ADMA_SGTBL_LEN * qc->tag + (idx-5)];
1107 nv_adma_fill_aprd(qc, sg, idx, aprd);
1111 cpb->next_aprd = cpu_to_le64(((u64)(pp->aprd_dma + NV_ADMA_SGTBL_SZ * qc->tag)));
1114 static void nv_adma_qc_prep(struct ata_queued_cmd *qc)
1116 struct nv_adma_port_priv *pp = qc->ap->private_data;
1117 struct nv_adma_cpb *cpb = &pp->cpb[qc->tag];
1118 u8 ctl_flags = NV_CPB_CTL_CPB_VALID |
1119 NV_CPB_CTL_APRD_VALID |
1122 VPRINTK("qc->flags = 0x%lx\n", qc->flags);
1124 if (!(qc->flags & ATA_QCFLAG_DMAMAP) ||
1125 (pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE)) {
1126 nv_adma_register_mode(qc->ap);
1131 memset(cpb, 0, sizeof(struct nv_adma_cpb));
1135 cpb->next_cpb_idx = 0;
1137 /* turn on NCQ flags for NCQ commands */
1138 if (qc->tf.protocol == ATA_PROT_NCQ)
1139 ctl_flags |= NV_CPB_CTL_QUEUE | NV_CPB_CTL_FPDMA;
1141 nv_adma_tf_to_cpb(&qc->tf, cpb->tf);
1143 nv_adma_fill_sg(qc, cpb);
1145 /* Be paranoid and don't let the device see NV_CPB_CTL_CPB_VALID until we are
1146 finished filling in all of the contents */
1148 cpb->ctl_flags = ctl_flags;
1151 static unsigned int nv_adma_qc_issue(struct ata_queued_cmd *qc)
1153 struct nv_adma_port_priv *pp = qc->ap->private_data;
1154 void __iomem *mmio = nv_adma_ctl_block(qc->ap);
1155 int curr_ncq = (qc->tf.protocol == ATA_PROT_NCQ);
1159 if (!(qc->flags & ATA_QCFLAG_DMAMAP) ||
1160 (pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE)) {
1161 /* use ATA register mode */
1162 VPRINTK("no dmamap or ATAPI, using ATA register mode: 0x%lx\n", qc->flags);
1163 nv_adma_register_mode(qc->ap);
1164 return ata_qc_issue_prot(qc);
1166 nv_adma_mode(qc->ap);
1168 /* write append register, command tag in lower 8 bits
1169 and (number of cpbs to append -1) in top 8 bits */
1172 if(curr_ncq != pp->last_issue_ncq) {
1173 /* Seems to need some delay before switching between NCQ and non-NCQ
1174 commands, else we get command timeouts and such. */
1176 pp->last_issue_ncq = curr_ncq;
1179 writew(qc->tag, mmio + NV_ADMA_APPEND);
1181 DPRINTK("Issued tag %u\n",qc->tag);
1186 static irqreturn_t nv_generic_interrupt(int irq, void *dev_instance)
1188 struct ata_host *host = dev_instance;
1190 unsigned int handled = 0;
1191 unsigned long flags;
1193 spin_lock_irqsave(&host->lock, flags);
1195 for (i = 0; i < host->n_ports; i++) {
1196 struct ata_port *ap;
1198 ap = host->ports[i];
1200 !(ap->flags & ATA_FLAG_DISABLED)) {
1201 struct ata_queued_cmd *qc;
1203 qc = ata_qc_from_tag(ap, ap->active_tag);
1204 if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING)))
1205 handled += ata_host_intr(ap, qc);
1207 // No request pending? Clear interrupt status
1208 // anyway, in case there's one pending.
1209 ap->ops->check_status(ap);
1214 spin_unlock_irqrestore(&host->lock, flags);
1216 return IRQ_RETVAL(handled);
1219 static irqreturn_t nv_do_interrupt(struct ata_host *host, u8 irq_stat)
1223 for (i = 0; i < host->n_ports; i++) {
1224 struct ata_port *ap = host->ports[i];
1226 if (ap && !(ap->flags & ATA_FLAG_DISABLED))
1227 handled += nv_host_intr(ap, irq_stat);
1229 irq_stat >>= NV_INT_PORT_SHIFT;
1232 return IRQ_RETVAL(handled);
1235 static irqreturn_t nv_nf2_interrupt(int irq, void *dev_instance)
1237 struct ata_host *host = dev_instance;
1241 spin_lock(&host->lock);
1242 irq_stat = inb(host->ports[0]->ioaddr.scr_addr + NV_INT_STATUS);
1243 ret = nv_do_interrupt(host, irq_stat);
1244 spin_unlock(&host->lock);
1249 static irqreturn_t nv_ck804_interrupt(int irq, void *dev_instance)
1251 struct ata_host *host = dev_instance;
1255 spin_lock(&host->lock);
1256 irq_stat = readb(host->mmio_base + NV_INT_STATUS_CK804);
1257 ret = nv_do_interrupt(host, irq_stat);
1258 spin_unlock(&host->lock);
1263 static u32 nv_scr_read (struct ata_port *ap, unsigned int sc_reg)
1265 if (sc_reg > SCR_CONTROL)
1268 return ioread32((void __iomem *)ap->ioaddr.scr_addr + (sc_reg * 4));
1271 static void nv_scr_write (struct ata_port *ap, unsigned int sc_reg, u32 val)
1273 if (sc_reg > SCR_CONTROL)
1276 iowrite32(val, (void __iomem *)ap->ioaddr.scr_addr + (sc_reg * 4));
1279 static void nv_nf2_freeze(struct ata_port *ap)
1281 unsigned long scr_addr = ap->host->ports[0]->ioaddr.scr_addr;
1282 int shift = ap->port_no * NV_INT_PORT_SHIFT;
1285 mask = inb(scr_addr + NV_INT_ENABLE);
1286 mask &= ~(NV_INT_ALL << shift);
1287 outb(mask, scr_addr + NV_INT_ENABLE);
1290 static void nv_nf2_thaw(struct ata_port *ap)
1292 unsigned long scr_addr = ap->host->ports[0]->ioaddr.scr_addr;
1293 int shift = ap->port_no * NV_INT_PORT_SHIFT;
1296 outb(NV_INT_ALL << shift, scr_addr + NV_INT_STATUS);
1298 mask = inb(scr_addr + NV_INT_ENABLE);
1299 mask |= (NV_INT_MASK << shift);
1300 outb(mask, scr_addr + NV_INT_ENABLE);
1303 static void nv_ck804_freeze(struct ata_port *ap)
1305 void __iomem *mmio_base = ap->host->mmio_base;
1306 int shift = ap->port_no * NV_INT_PORT_SHIFT;
1309 mask = readb(mmio_base + NV_INT_ENABLE_CK804);
1310 mask &= ~(NV_INT_ALL << shift);
1311 writeb(mask, mmio_base + NV_INT_ENABLE_CK804);
1314 static void nv_ck804_thaw(struct ata_port *ap)
1316 void __iomem *mmio_base = ap->host->mmio_base;
1317 int shift = ap->port_no * NV_INT_PORT_SHIFT;
1320 writeb(NV_INT_ALL << shift, mmio_base + NV_INT_STATUS_CK804);
1322 mask = readb(mmio_base + NV_INT_ENABLE_CK804);
1323 mask |= (NV_INT_MASK << shift);
1324 writeb(mask, mmio_base + NV_INT_ENABLE_CK804);
1327 static int nv_hardreset(struct ata_port *ap, unsigned int *class)
1331 /* SATA hardreset fails to retrieve proper device signature on
1332 * some controllers. Don't classify on hardreset. For more
1333 * info, see http://bugme.osdl.org/show_bug.cgi?id=3352
1335 return sata_std_hardreset(ap, &dummy);
1338 static void nv_error_handler(struct ata_port *ap)
1340 ata_bmdma_drive_eh(ap, ata_std_prereset, ata_std_softreset,
1341 nv_hardreset, ata_std_postreset);
1344 static void nv_adma_error_handler(struct ata_port *ap)
1346 struct nv_adma_port_priv *pp = ap->private_data;
1347 if(!(pp->flags & NV_ADMA_PORT_REGISTER_MODE)) {
1348 void __iomem *mmio = nv_adma_ctl_block(ap);
1352 u32 notifier = readl(mmio + NV_ADMA_NOTIFIER);
1353 u32 notifier_error = readl(mmio + NV_ADMA_NOTIFIER_ERROR);
1354 u32 gen_ctl = readl(nv_adma_gen_block(ap) + NV_ADMA_GEN_CTL);
1355 u32 status = readw(mmio + NV_ADMA_STAT);
1357 ata_port_printk(ap, KERN_ERR, "EH in ADMA mode, notifier 0x%X "
1358 "notifier_error 0x%X gen_ctl 0x%X status 0x%X\n",
1359 notifier, notifier_error, gen_ctl, status);
1361 for( i=0;i<NV_ADMA_MAX_CPBS;i++) {
1362 struct nv_adma_cpb *cpb = &pp->cpb[i];
1363 if( cpb->ctl_flags || cpb->resp_flags )
1364 ata_port_printk(ap, KERN_ERR,
1365 "CPB %d: ctl_flags 0x%x, resp_flags 0x%x\n",
1366 i, cpb->ctl_flags, cpb->resp_flags);
1369 /* Push us back into port register mode for error handling. */
1370 nv_adma_register_mode(ap);
1372 ata_port_printk(ap, KERN_ERR, "Resetting port\n");
1374 /* Mark all of the CPBs as invalid to prevent them from being executed */
1375 for( i=0;i<NV_ADMA_MAX_CPBS;i++)
1376 pp->cpb[i].ctl_flags &= ~NV_CPB_CTL_CPB_VALID;
1378 /* clear CPB fetch count */
1379 writew(0, mmio + NV_ADMA_CPB_COUNT);
1382 tmp = readw(mmio + NV_ADMA_CTL);
1383 writew(tmp | NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
1384 readl( mmio + NV_ADMA_CTL ); /* flush posted write */
1386 writew(tmp & ~NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
1387 readl( mmio + NV_ADMA_CTL ); /* flush posted write */
1390 ata_bmdma_drive_eh(ap, ata_std_prereset, ata_std_softreset,
1391 nv_hardreset, ata_std_postreset);
1394 static int nv_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
1396 static int printed_version = 0;
1397 struct ata_port_info *ppi[2];
1398 struct ata_probe_ent *probe_ent;
1399 int pci_dev_busy = 0;
1403 unsigned long type = ent->driver_data;
1406 // Make sure this is a SATA controller by counting the number of bars
1407 // (NVIDIA SATA controllers will always have six bars). Otherwise,
1408 // it's an IDE controller and we ignore it.
1409 for (bar=0; bar<6; bar++)
1410 if (pci_resource_start(pdev, bar) == 0)
1413 if ( !printed_version++)
1414 dev_printk(KERN_DEBUG, &pdev->dev, "version " DRV_VERSION "\n");
1416 rc = pci_enable_device(pdev);
1420 rc = pci_request_regions(pdev, DRV_NAME);
1423 goto err_out_disable;
1426 if(type >= CK804 && adma_enabled) {
1427 dev_printk(KERN_NOTICE, &pdev->dev, "Using ADMA mode\n");
1429 if(!pci_set_dma_mask(pdev, DMA_64BIT_MASK) &&
1430 !pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK))
1435 rc = pci_set_dma_mask(pdev, ATA_DMA_MASK);
1437 goto err_out_regions;
1438 rc = pci_set_consistent_dma_mask(pdev, ATA_DMA_MASK);
1440 goto err_out_regions;
1445 ppi[0] = ppi[1] = &nv_port_info[type];
1446 probe_ent = ata_pci_init_native_mode(pdev, ppi, ATA_PORT_PRIMARY | ATA_PORT_SECONDARY);
1448 goto err_out_regions;
1450 probe_ent->mmio_base = pci_iomap(pdev, 5, 0);
1451 if (!probe_ent->mmio_base) {
1453 goto err_out_free_ent;
1456 base = (unsigned long)probe_ent->mmio_base;
1458 probe_ent->port[0].scr_addr = base + NV_PORT0_SCR_REG_OFFSET;
1459 probe_ent->port[1].scr_addr = base + NV_PORT1_SCR_REG_OFFSET;
1461 /* enable SATA space for CK804 */
1462 if (type >= CK804) {
1465 pci_read_config_byte(pdev, NV_MCP_SATA_CFG_20, ®val);
1466 regval |= NV_MCP_SATA_CFG_20_SATA_SPACE_EN;
1467 pci_write_config_byte(pdev, NV_MCP_SATA_CFG_20, regval);
1470 pci_set_master(pdev);
1473 rc = nv_adma_host_init(probe_ent);
1475 goto err_out_iounmap;
1478 rc = ata_device_add(probe_ent);
1480 goto err_out_iounmap;
1487 pci_iounmap(pdev, probe_ent->mmio_base);
1491 pci_release_regions(pdev);
1494 pci_disable_device(pdev);
1499 static void nv_ck804_host_stop(struct ata_host *host)
1501 struct pci_dev *pdev = to_pci_dev(host->dev);
1504 /* disable SATA space for CK804 */
1505 pci_read_config_byte(pdev, NV_MCP_SATA_CFG_20, ®val);
1506 regval &= ~NV_MCP_SATA_CFG_20_SATA_SPACE_EN;
1507 pci_write_config_byte(pdev, NV_MCP_SATA_CFG_20, regval);
1509 ata_pci_host_stop(host);
1512 static void nv_adma_host_stop(struct ata_host *host)
1514 struct pci_dev *pdev = to_pci_dev(host->dev);
1518 for (i = 0; i < host->n_ports; i++) {
1519 void __iomem *mmio = __nv_adma_ctl_block(host->mmio_base, i);
1522 /* disable interrupt */
1523 tmp = readw(mmio + NV_ADMA_CTL);
1524 writew(tmp & ~NV_ADMA_CTL_AIEN, mmio + NV_ADMA_CTL);
1527 /* disable ADMA on the ports */
1528 pci_read_config_dword(pdev, NV_MCP_SATA_CFG_20, &tmp32);
1529 tmp32 &= ~(NV_MCP_SATA_CFG_20_PORT0_EN |
1530 NV_MCP_SATA_CFG_20_PORT0_PWB_EN |
1531 NV_MCP_SATA_CFG_20_PORT1_EN |
1532 NV_MCP_SATA_CFG_20_PORT1_PWB_EN);
1534 pci_write_config_dword(pdev, NV_MCP_SATA_CFG_20, tmp32);
1536 nv_ck804_host_stop(host);
1539 static int __init nv_init(void)
1541 return pci_register_driver(&nv_pci_driver);
1544 static void __exit nv_exit(void)
1546 pci_unregister_driver(&nv_pci_driver);
1549 module_init(nv_init);
1550 module_exit(nv_exit);
1551 module_param_named(adma, adma_enabled, bool, 0444);
1552 MODULE_PARM_DESC(adma, "Enable use of ADMA (Default: true)");