1 /* radeon_state.c -- State support for Radeon -*- linux-c -*-
3 * Copyright 2000 VA Linux Systems, Inc., Fremont, California.
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice (including the next
14 * paragraph) shall be included in all copies or substantial portions of the
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
20 * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
21 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
22 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
23 * DEALINGS IN THE SOFTWARE.
26 * Gareth Hughes <gareth@valinux.com>
27 * Kevin E. Martin <martin@valinux.com>
33 #include "drm_sarea.h"
34 #include "radeon_drm.h"
35 #include "radeon_drv.h"
38 /* ================================================================
39 * Helper functions for client state checking and fixup
42 static __inline__ int radeon_check_and_fixup_offset( drm_radeon_private_t *dev_priv,
43 drm_file_t *filp_priv,
46 struct drm_radeon_driver_file_fields *radeon_priv;
48 if ( off >= dev_priv->fb_location &&
49 off < ( dev_priv->gart_vm_start + dev_priv->gart_size ) )
52 radeon_priv = filp_priv->driver_priv;
53 off += radeon_priv->radeon_fb_delta;
55 DRM_DEBUG( "offset fixed up to 0x%x\n", off );
57 if ( off < dev_priv->fb_location ||
58 off >= ( dev_priv->gart_vm_start + dev_priv->gart_size ) )
59 return DRM_ERR( EINVAL );
66 static __inline__ int radeon_check_and_fixup_offset_user( drm_radeon_private_t *dev_priv,
67 drm_file_t *filp_priv,
68 u32 __user *offset ) {
71 DRM_GET_USER_UNCHECKED( off, offset );
73 if ( radeon_check_and_fixup_offset( dev_priv, filp_priv, &off ) )
74 return DRM_ERR( EINVAL );
76 DRM_PUT_USER_UNCHECKED( offset, off );
81 static __inline__ int radeon_check_and_fixup_packets( drm_radeon_private_t *dev_priv,
82 drm_file_t *filp_priv,
87 case RADEON_EMIT_PP_MISC:
88 if ( radeon_check_and_fixup_offset_user( dev_priv, filp_priv,
89 &data[( RADEON_RB3D_DEPTHOFFSET
90 - RADEON_PP_MISC ) / 4] ) ) {
91 DRM_ERROR( "Invalid depth buffer offset\n" );
92 return DRM_ERR( EINVAL );
96 case RADEON_EMIT_PP_CNTL:
97 if ( radeon_check_and_fixup_offset_user( dev_priv, filp_priv,
98 &data[( RADEON_RB3D_COLOROFFSET
99 - RADEON_PP_CNTL ) / 4] ) ) {
100 DRM_ERROR( "Invalid colour buffer offset\n" );
101 return DRM_ERR( EINVAL );
105 case R200_EMIT_PP_TXOFFSET_0:
106 case R200_EMIT_PP_TXOFFSET_1:
107 case R200_EMIT_PP_TXOFFSET_2:
108 case R200_EMIT_PP_TXOFFSET_3:
109 case R200_EMIT_PP_TXOFFSET_4:
110 case R200_EMIT_PP_TXOFFSET_5:
111 if ( radeon_check_and_fixup_offset_user( dev_priv, filp_priv,
113 DRM_ERROR( "Invalid R200 texture offset\n" );
114 return DRM_ERR( EINVAL );
118 case RADEON_EMIT_PP_TXFILTER_0:
119 case RADEON_EMIT_PP_TXFILTER_1:
120 case RADEON_EMIT_PP_TXFILTER_2:
121 if ( radeon_check_and_fixup_offset_user( dev_priv, filp_priv,
122 &data[( RADEON_PP_TXOFFSET_0
123 - RADEON_PP_TXFILTER_0 ) / 4] ) ) {
124 DRM_ERROR( "Invalid R100 texture offset\n" );
125 return DRM_ERR( EINVAL );
129 case R200_EMIT_PP_CUBIC_OFFSETS_0:
130 case R200_EMIT_PP_CUBIC_OFFSETS_1:
131 case R200_EMIT_PP_CUBIC_OFFSETS_2:
132 case R200_EMIT_PP_CUBIC_OFFSETS_3:
133 case R200_EMIT_PP_CUBIC_OFFSETS_4:
134 case R200_EMIT_PP_CUBIC_OFFSETS_5: {
136 for ( i = 0; i < 5; i++ ) {
137 if ( radeon_check_and_fixup_offset_user( dev_priv,
140 DRM_ERROR( "Invalid R200 cubic texture offset\n" );
141 return DRM_ERR( EINVAL );
147 case RADEON_EMIT_RB3D_COLORPITCH:
148 case RADEON_EMIT_RE_LINE_PATTERN:
149 case RADEON_EMIT_SE_LINE_WIDTH:
150 case RADEON_EMIT_PP_LUM_MATRIX:
151 case RADEON_EMIT_PP_ROT_MATRIX_0:
152 case RADEON_EMIT_RB3D_STENCILREFMASK:
153 case RADEON_EMIT_SE_VPORT_XSCALE:
154 case RADEON_EMIT_SE_CNTL:
155 case RADEON_EMIT_SE_CNTL_STATUS:
156 case RADEON_EMIT_RE_MISC:
157 case RADEON_EMIT_PP_BORDER_COLOR_0:
158 case RADEON_EMIT_PP_BORDER_COLOR_1:
159 case RADEON_EMIT_PP_BORDER_COLOR_2:
160 case RADEON_EMIT_SE_ZBIAS_FACTOR:
161 case RADEON_EMIT_SE_TCL_OUTPUT_VTX_FMT:
162 case RADEON_EMIT_SE_TCL_MATERIAL_EMMISSIVE_RED:
163 case R200_EMIT_PP_TXCBLEND_0:
164 case R200_EMIT_PP_TXCBLEND_1:
165 case R200_EMIT_PP_TXCBLEND_2:
166 case R200_EMIT_PP_TXCBLEND_3:
167 case R200_EMIT_PP_TXCBLEND_4:
168 case R200_EMIT_PP_TXCBLEND_5:
169 case R200_EMIT_PP_TXCBLEND_6:
170 case R200_EMIT_PP_TXCBLEND_7:
171 case R200_EMIT_TCL_LIGHT_MODEL_CTL_0:
172 case R200_EMIT_TFACTOR_0:
173 case R200_EMIT_VTX_FMT_0:
174 case R200_EMIT_VAP_CTL:
175 case R200_EMIT_MATRIX_SELECT_0:
176 case R200_EMIT_TEX_PROC_CTL_2:
177 case R200_EMIT_TCL_UCP_VERT_BLEND_CTL:
178 case R200_EMIT_PP_TXFILTER_0:
179 case R200_EMIT_PP_TXFILTER_1:
180 case R200_EMIT_PP_TXFILTER_2:
181 case R200_EMIT_PP_TXFILTER_3:
182 case R200_EMIT_PP_TXFILTER_4:
183 case R200_EMIT_PP_TXFILTER_5:
184 case R200_EMIT_VTE_CNTL:
185 case R200_EMIT_OUTPUT_VTX_COMP_SEL:
186 case R200_EMIT_PP_TAM_DEBUG3:
187 case R200_EMIT_PP_CNTL_X:
188 case R200_EMIT_RB3D_DEPTHXY_OFFSET:
189 case R200_EMIT_RE_AUX_SCISSOR_CNTL:
190 case R200_EMIT_RE_SCISSOR_TL_0:
191 case R200_EMIT_RE_SCISSOR_TL_1:
192 case R200_EMIT_RE_SCISSOR_TL_2:
193 case R200_EMIT_SE_VAP_CNTL_STATUS:
194 case R200_EMIT_SE_VTX_STATE_CNTL:
195 case R200_EMIT_RE_POINTSIZE:
196 case R200_EMIT_TCL_INPUT_VTX_VECTOR_ADDR_0:
197 case R200_EMIT_PP_CUBIC_FACES_0:
198 case R200_EMIT_PP_CUBIC_FACES_1:
199 case R200_EMIT_PP_CUBIC_FACES_2:
200 case R200_EMIT_PP_CUBIC_FACES_3:
201 case R200_EMIT_PP_CUBIC_FACES_4:
202 case R200_EMIT_PP_CUBIC_FACES_5:
203 case RADEON_EMIT_PP_TEX_SIZE_0:
204 case RADEON_EMIT_PP_TEX_SIZE_1:
205 case RADEON_EMIT_PP_TEX_SIZE_2:
206 case R200_EMIT_RB3D_BLENDCOLOR:
207 /* These packets don't contain memory offsets */
211 DRM_ERROR( "Unknown state packet ID %d\n", id );
212 return DRM_ERR( EINVAL );
218 static __inline__ int radeon_check_and_fixup_packet3( drm_radeon_private_t *dev_priv,
219 drm_file_t *filp_priv,
220 drm_radeon_cmd_buffer_t *cmdbuf,
221 unsigned int *cmdsz ) {
223 u32 __user *cmd = (u32 __user *)cmdbuf->buf;
225 if ( DRM_COPY_FROM_USER_UNCHECKED( tmp, cmd, sizeof( tmp ) ) ) {
226 DRM_ERROR( "Failed to copy data from user space\n" );
227 return DRM_ERR( EFAULT );
230 *cmdsz = 2 + ( ( tmp[0] & RADEON_CP_PACKET_COUNT_MASK ) >> 16 );
232 if ( ( tmp[0] & 0xc0000000 ) != RADEON_CP_PACKET3 ) {
233 DRM_ERROR( "Not a type 3 packet\n" );
234 return DRM_ERR( EINVAL );
237 if ( 4 * *cmdsz > cmdbuf->bufsz ) {
238 DRM_ERROR( "Packet size larger than size of data provided\n" );
239 return DRM_ERR( EINVAL );
242 /* Check client state and fix it up if necessary */
243 if ( tmp[0] & 0x8000 ) { /* MSB of opcode: next DWORD GUI_CNTL */
246 if ( tmp[1] & ( RADEON_GMC_SRC_PITCH_OFFSET_CNTL
247 | RADEON_GMC_DST_PITCH_OFFSET_CNTL ) ) {
248 offset = tmp[2] << 10;
249 if ( radeon_check_and_fixup_offset( dev_priv, filp_priv, &offset ) ) {
250 DRM_ERROR( "Invalid first packet offset\n" );
251 return DRM_ERR( EINVAL );
253 tmp[2] = ( tmp[2] & 0xffc00000 ) | offset >> 10;
256 if ( ( tmp[1] & RADEON_GMC_SRC_PITCH_OFFSET_CNTL ) &&
257 ( tmp[1] & RADEON_GMC_DST_PITCH_OFFSET_CNTL ) ) {
258 offset = tmp[3] << 10;
259 if ( radeon_check_and_fixup_offset( dev_priv, filp_priv, &offset ) ) {
260 DRM_ERROR( "Invalid second packet offset\n" );
261 return DRM_ERR( EINVAL );
263 tmp[3] = ( tmp[3] & 0xffc00000 ) | offset >> 10;
266 if ( DRM_COPY_TO_USER_UNCHECKED( cmd, tmp, sizeof( tmp ) ) ) {
267 DRM_ERROR( "Failed to copy data to user space\n" );
268 return DRM_ERR( EFAULT );
276 /* ================================================================
277 * CP hardware state programming functions
280 static __inline__ void radeon_emit_clip_rect( drm_radeon_private_t *dev_priv,
281 drm_clip_rect_t *box )
285 DRM_DEBUG( " box: x1=%d y1=%d x2=%d y2=%d\n",
286 box->x1, box->y1, box->x2, box->y2 );
289 OUT_RING( CP_PACKET0( RADEON_RE_TOP_LEFT, 0 ) );
290 OUT_RING( (box->y1 << 16) | box->x1 );
291 OUT_RING( CP_PACKET0( RADEON_RE_WIDTH_HEIGHT, 0 ) );
292 OUT_RING( ((box->y2 - 1) << 16) | (box->x2 - 1) );
298 static int radeon_emit_state( drm_radeon_private_t *dev_priv,
299 drm_file_t *filp_priv,
300 drm_radeon_context_regs_t *ctx,
301 drm_radeon_texture_regs_t *tex,
305 DRM_DEBUG( "dirty=0x%08x\n", dirty );
307 if ( dirty & RADEON_UPLOAD_CONTEXT ) {
308 if ( radeon_check_and_fixup_offset( dev_priv, filp_priv,
309 &ctx->rb3d_depthoffset ) ) {
310 DRM_ERROR( "Invalid depth buffer offset\n" );
311 return DRM_ERR( EINVAL );
314 if ( radeon_check_and_fixup_offset( dev_priv, filp_priv,
315 &ctx->rb3d_coloroffset ) ) {
316 DRM_ERROR( "Invalid depth buffer offset\n" );
317 return DRM_ERR( EINVAL );
321 OUT_RING( CP_PACKET0( RADEON_PP_MISC, 6 ) );
322 OUT_RING( ctx->pp_misc );
323 OUT_RING( ctx->pp_fog_color );
324 OUT_RING( ctx->re_solid_color );
325 OUT_RING( ctx->rb3d_blendcntl );
326 OUT_RING( ctx->rb3d_depthoffset );
327 OUT_RING( ctx->rb3d_depthpitch );
328 OUT_RING( ctx->rb3d_zstencilcntl );
329 OUT_RING( CP_PACKET0( RADEON_PP_CNTL, 2 ) );
330 OUT_RING( ctx->pp_cntl );
331 OUT_RING( ctx->rb3d_cntl );
332 OUT_RING( ctx->rb3d_coloroffset );
333 OUT_RING( CP_PACKET0( RADEON_RB3D_COLORPITCH, 0 ) );
334 OUT_RING( ctx->rb3d_colorpitch );
338 if ( dirty & RADEON_UPLOAD_VERTFMT ) {
340 OUT_RING( CP_PACKET0( RADEON_SE_COORD_FMT, 0 ) );
341 OUT_RING( ctx->se_coord_fmt );
345 if ( dirty & RADEON_UPLOAD_LINE ) {
347 OUT_RING( CP_PACKET0( RADEON_RE_LINE_PATTERN, 1 ) );
348 OUT_RING( ctx->re_line_pattern );
349 OUT_RING( ctx->re_line_state );
350 OUT_RING( CP_PACKET0( RADEON_SE_LINE_WIDTH, 0 ) );
351 OUT_RING( ctx->se_line_width );
355 if ( dirty & RADEON_UPLOAD_BUMPMAP ) {
357 OUT_RING( CP_PACKET0( RADEON_PP_LUM_MATRIX, 0 ) );
358 OUT_RING( ctx->pp_lum_matrix );
359 OUT_RING( CP_PACKET0( RADEON_PP_ROT_MATRIX_0, 1 ) );
360 OUT_RING( ctx->pp_rot_matrix_0 );
361 OUT_RING( ctx->pp_rot_matrix_1 );
365 if ( dirty & RADEON_UPLOAD_MASKS ) {
367 OUT_RING( CP_PACKET0( RADEON_RB3D_STENCILREFMASK, 2 ) );
368 OUT_RING( ctx->rb3d_stencilrefmask );
369 OUT_RING( ctx->rb3d_ropcntl );
370 OUT_RING( ctx->rb3d_planemask );
374 if ( dirty & RADEON_UPLOAD_VIEWPORT ) {
376 OUT_RING( CP_PACKET0( RADEON_SE_VPORT_XSCALE, 5 ) );
377 OUT_RING( ctx->se_vport_xscale );
378 OUT_RING( ctx->se_vport_xoffset );
379 OUT_RING( ctx->se_vport_yscale );
380 OUT_RING( ctx->se_vport_yoffset );
381 OUT_RING( ctx->se_vport_zscale );
382 OUT_RING( ctx->se_vport_zoffset );
386 if ( dirty & RADEON_UPLOAD_SETUP ) {
388 OUT_RING( CP_PACKET0( RADEON_SE_CNTL, 0 ) );
389 OUT_RING( ctx->se_cntl );
390 OUT_RING( CP_PACKET0( RADEON_SE_CNTL_STATUS, 0 ) );
391 OUT_RING( ctx->se_cntl_status );
395 if ( dirty & RADEON_UPLOAD_MISC ) {
397 OUT_RING( CP_PACKET0( RADEON_RE_MISC, 0 ) );
398 OUT_RING( ctx->re_misc );
402 if ( dirty & RADEON_UPLOAD_TEX0 ) {
403 if ( radeon_check_and_fixup_offset( dev_priv, filp_priv,
404 &tex[0].pp_txoffset ) ) {
405 DRM_ERROR( "Invalid texture offset for unit 0\n" );
406 return DRM_ERR( EINVAL );
410 OUT_RING( CP_PACKET0( RADEON_PP_TXFILTER_0, 5 ) );
411 OUT_RING( tex[0].pp_txfilter );
412 OUT_RING( tex[0].pp_txformat );
413 OUT_RING( tex[0].pp_txoffset );
414 OUT_RING( tex[0].pp_txcblend );
415 OUT_RING( tex[0].pp_txablend );
416 OUT_RING( tex[0].pp_tfactor );
417 OUT_RING( CP_PACKET0( RADEON_PP_BORDER_COLOR_0, 0 ) );
418 OUT_RING( tex[0].pp_border_color );
422 if ( dirty & RADEON_UPLOAD_TEX1 ) {
423 if ( radeon_check_and_fixup_offset( dev_priv, filp_priv,
424 &tex[1].pp_txoffset ) ) {
425 DRM_ERROR( "Invalid texture offset for unit 1\n" );
426 return DRM_ERR( EINVAL );
430 OUT_RING( CP_PACKET0( RADEON_PP_TXFILTER_1, 5 ) );
431 OUT_RING( tex[1].pp_txfilter );
432 OUT_RING( tex[1].pp_txformat );
433 OUT_RING( tex[1].pp_txoffset );
434 OUT_RING( tex[1].pp_txcblend );
435 OUT_RING( tex[1].pp_txablend );
436 OUT_RING( tex[1].pp_tfactor );
437 OUT_RING( CP_PACKET0( RADEON_PP_BORDER_COLOR_1, 0 ) );
438 OUT_RING( tex[1].pp_border_color );
442 if ( dirty & RADEON_UPLOAD_TEX2 ) {
443 if ( radeon_check_and_fixup_offset( dev_priv, filp_priv,
444 &tex[2].pp_txoffset ) ) {
445 DRM_ERROR( "Invalid texture offset for unit 2\n" );
446 return DRM_ERR( EINVAL );
450 OUT_RING( CP_PACKET0( RADEON_PP_TXFILTER_2, 5 ) );
451 OUT_RING( tex[2].pp_txfilter );
452 OUT_RING( tex[2].pp_txformat );
453 OUT_RING( tex[2].pp_txoffset );
454 OUT_RING( tex[2].pp_txcblend );
455 OUT_RING( tex[2].pp_txablend );
456 OUT_RING( tex[2].pp_tfactor );
457 OUT_RING( CP_PACKET0( RADEON_PP_BORDER_COLOR_2, 0 ) );
458 OUT_RING( tex[2].pp_border_color );
467 static int radeon_emit_state2( drm_radeon_private_t *dev_priv,
468 drm_file_t *filp_priv,
469 drm_radeon_state_t *state )
473 if (state->dirty & RADEON_UPLOAD_ZBIAS) {
475 OUT_RING( CP_PACKET0( RADEON_SE_ZBIAS_FACTOR, 1 ) );
476 OUT_RING( state->context2.se_zbias_factor );
477 OUT_RING( state->context2.se_zbias_constant );
481 return radeon_emit_state( dev_priv, filp_priv, &state->context,
482 state->tex, state->dirty );
485 /* New (1.3) state mechanism. 3 commands (packet, scalar, vector) in
486 * 1.3 cmdbuffers allow all previous state to be updated as well as
487 * the tcl scalar and vector areas.
493 } packet[RADEON_MAX_STATE_PACKETS] = {
494 { RADEON_PP_MISC,7,"RADEON_PP_MISC" },
495 { RADEON_PP_CNTL,3,"RADEON_PP_CNTL" },
496 { RADEON_RB3D_COLORPITCH,1,"RADEON_RB3D_COLORPITCH" },
497 { RADEON_RE_LINE_PATTERN,2,"RADEON_RE_LINE_PATTERN" },
498 { RADEON_SE_LINE_WIDTH,1,"RADEON_SE_LINE_WIDTH" },
499 { RADEON_PP_LUM_MATRIX,1,"RADEON_PP_LUM_MATRIX" },
500 { RADEON_PP_ROT_MATRIX_0,2,"RADEON_PP_ROT_MATRIX_0" },
501 { RADEON_RB3D_STENCILREFMASK,3,"RADEON_RB3D_STENCILREFMASK" },
502 { RADEON_SE_VPORT_XSCALE,6,"RADEON_SE_VPORT_XSCALE" },
503 { RADEON_SE_CNTL,2,"RADEON_SE_CNTL" },
504 { RADEON_SE_CNTL_STATUS,1,"RADEON_SE_CNTL_STATUS" },
505 { RADEON_RE_MISC,1,"RADEON_RE_MISC" },
506 { RADEON_PP_TXFILTER_0,6,"RADEON_PP_TXFILTER_0" },
507 { RADEON_PP_BORDER_COLOR_0,1,"RADEON_PP_BORDER_COLOR_0" },
508 { RADEON_PP_TXFILTER_1,6,"RADEON_PP_TXFILTER_1" },
509 { RADEON_PP_BORDER_COLOR_1,1,"RADEON_PP_BORDER_COLOR_1" },
510 { RADEON_PP_TXFILTER_2,6,"RADEON_PP_TXFILTER_2" },
511 { RADEON_PP_BORDER_COLOR_2,1,"RADEON_PP_BORDER_COLOR_2" },
512 { RADEON_SE_ZBIAS_FACTOR,2,"RADEON_SE_ZBIAS_FACTOR" },
513 { RADEON_SE_TCL_OUTPUT_VTX_FMT,11,"RADEON_SE_TCL_OUTPUT_VTX_FMT" },
514 { RADEON_SE_TCL_MATERIAL_EMMISSIVE_RED,17,"RADEON_SE_TCL_MATERIAL_EMMISSIVE_RED" },
515 { R200_PP_TXCBLEND_0, 4, "R200_PP_TXCBLEND_0" },
516 { R200_PP_TXCBLEND_1, 4, "R200_PP_TXCBLEND_1" },
517 { R200_PP_TXCBLEND_2, 4, "R200_PP_TXCBLEND_2" },
518 { R200_PP_TXCBLEND_3, 4, "R200_PP_TXCBLEND_3" },
519 { R200_PP_TXCBLEND_4, 4, "R200_PP_TXCBLEND_4" },
520 { R200_PP_TXCBLEND_5, 4, "R200_PP_TXCBLEND_5" },
521 { R200_PP_TXCBLEND_6, 4, "R200_PP_TXCBLEND_6" },
522 { R200_PP_TXCBLEND_7, 4, "R200_PP_TXCBLEND_7" },
523 { R200_SE_TCL_LIGHT_MODEL_CTL_0, 6, "R200_SE_TCL_LIGHT_MODEL_CTL_0" },
524 { R200_PP_TFACTOR_0, 6, "R200_PP_TFACTOR_0" },
525 { R200_SE_VTX_FMT_0, 4, "R200_SE_VTX_FMT_0" },
526 { R200_SE_VAP_CNTL, 1, "R200_SE_VAP_CNTL" },
527 { R200_SE_TCL_MATRIX_SEL_0, 5, "R200_SE_TCL_MATRIX_SEL_0" },
528 { R200_SE_TCL_TEX_PROC_CTL_2, 5, "R200_SE_TCL_TEX_PROC_CTL_2" },
529 { R200_SE_TCL_UCP_VERT_BLEND_CTL, 1, "R200_SE_TCL_UCP_VERT_BLEND_CTL" },
530 { R200_PP_TXFILTER_0, 6, "R200_PP_TXFILTER_0" },
531 { R200_PP_TXFILTER_1, 6, "R200_PP_TXFILTER_1" },
532 { R200_PP_TXFILTER_2, 6, "R200_PP_TXFILTER_2" },
533 { R200_PP_TXFILTER_3, 6, "R200_PP_TXFILTER_3" },
534 { R200_PP_TXFILTER_4, 6, "R200_PP_TXFILTER_4" },
535 { R200_PP_TXFILTER_5, 6, "R200_PP_TXFILTER_5" },
536 { R200_PP_TXOFFSET_0, 1, "R200_PP_TXOFFSET_0" },
537 { R200_PP_TXOFFSET_1, 1, "R200_PP_TXOFFSET_1" },
538 { R200_PP_TXOFFSET_2, 1, "R200_PP_TXOFFSET_2" },
539 { R200_PP_TXOFFSET_3, 1, "R200_PP_TXOFFSET_3" },
540 { R200_PP_TXOFFSET_4, 1, "R200_PP_TXOFFSET_4" },
541 { R200_PP_TXOFFSET_5, 1, "R200_PP_TXOFFSET_5" },
542 { R200_SE_VTE_CNTL, 1, "R200_SE_VTE_CNTL" },
543 { R200_SE_TCL_OUTPUT_VTX_COMP_SEL, 1, "R200_SE_TCL_OUTPUT_VTX_COMP_SEL" },
544 { R200_PP_TAM_DEBUG3, 1, "R200_PP_TAM_DEBUG3" },
545 { R200_PP_CNTL_X, 1, "R200_PP_CNTL_X" },
546 { R200_RB3D_DEPTHXY_OFFSET, 1, "R200_RB3D_DEPTHXY_OFFSET" },
547 { R200_RE_AUX_SCISSOR_CNTL, 1, "R200_RE_AUX_SCISSOR_CNTL" },
548 { R200_RE_SCISSOR_TL_0, 2, "R200_RE_SCISSOR_TL_0" },
549 { R200_RE_SCISSOR_TL_1, 2, "R200_RE_SCISSOR_TL_1" },
550 { R200_RE_SCISSOR_TL_2, 2, "R200_RE_SCISSOR_TL_2" },
551 { R200_SE_VAP_CNTL_STATUS, 1, "R200_SE_VAP_CNTL_STATUS" },
552 { R200_SE_VTX_STATE_CNTL, 1, "R200_SE_VTX_STATE_CNTL" },
553 { R200_RE_POINTSIZE, 1, "R200_RE_POINTSIZE" },
554 { R200_SE_TCL_INPUT_VTX_VECTOR_ADDR_0, 4, "R200_SE_TCL_INPUT_VTX_VECTOR_ADDR_0" },
555 { R200_PP_CUBIC_FACES_0, 1, "R200_PP_CUBIC_FACES_0" }, /* 61 */
556 { R200_PP_CUBIC_OFFSET_F1_0, 5, "R200_PP_CUBIC_OFFSET_F1_0" }, /* 62 */
557 { R200_PP_CUBIC_FACES_1, 1, "R200_PP_CUBIC_FACES_1" },
558 { R200_PP_CUBIC_OFFSET_F1_1, 5, "R200_PP_CUBIC_OFFSET_F1_1" },
559 { R200_PP_CUBIC_FACES_2, 1, "R200_PP_CUBIC_FACES_2" },
560 { R200_PP_CUBIC_OFFSET_F1_2, 5, "R200_PP_CUBIC_OFFSET_F1_2" },
561 { R200_PP_CUBIC_FACES_3, 1, "R200_PP_CUBIC_FACES_3" },
562 { R200_PP_CUBIC_OFFSET_F1_3, 5, "R200_PP_CUBIC_OFFSET_F1_3" },
563 { R200_PP_CUBIC_FACES_4, 1, "R200_PP_CUBIC_FACES_4" },
564 { R200_PP_CUBIC_OFFSET_F1_4, 5, "R200_PP_CUBIC_OFFSET_F1_4" },
565 { R200_PP_CUBIC_FACES_5, 1, "R200_PP_CUBIC_FACES_5" },
566 { R200_PP_CUBIC_OFFSET_F1_5, 5, "R200_PP_CUBIC_OFFSET_F1_5" },
567 { RADEON_PP_TEX_SIZE_0, 2, "RADEON_PP_TEX_SIZE_0" },
568 { RADEON_PP_TEX_SIZE_1, 2, "RADEON_PP_TEX_SIZE_1" },
569 { RADEON_PP_TEX_SIZE_2, 2, "RADEON_PP_TEX_SIZE_2" },
570 { R200_RB3D_BLENDCOLOR, 3, "R200_RB3D_BLENDCOLOR" },
575 /* ================================================================
576 * Performance monitoring functions
579 static void radeon_clear_box( drm_radeon_private_t *dev_priv,
580 int x, int y, int w, int h,
581 int r, int g, int b )
586 x += dev_priv->sarea_priv->boxes[0].x1;
587 y += dev_priv->sarea_priv->boxes[0].y1;
589 switch ( dev_priv->color_fmt ) {
590 case RADEON_COLOR_FORMAT_RGB565:
591 color = (((r & 0xf8) << 8) |
595 case RADEON_COLOR_FORMAT_ARGB8888:
597 color = (((0xff) << 24) | (r << 16) | (g << 8) | b);
602 RADEON_WAIT_UNTIL_3D_IDLE();
603 OUT_RING( CP_PACKET0( RADEON_DP_WRITE_MASK, 0 ) );
604 OUT_RING( 0xffffffff );
609 OUT_RING( CP_PACKET3( RADEON_CNTL_PAINT_MULTI, 4 ) );
610 OUT_RING( RADEON_GMC_DST_PITCH_OFFSET_CNTL |
611 RADEON_GMC_BRUSH_SOLID_COLOR |
612 (dev_priv->color_fmt << 8) |
613 RADEON_GMC_SRC_DATATYPE_COLOR |
615 RADEON_GMC_CLR_CMP_CNTL_DIS );
617 if ( dev_priv->page_flipping && dev_priv->current_page == 1 ) {
618 OUT_RING( dev_priv->front_pitch_offset );
620 OUT_RING( dev_priv->back_pitch_offset );
625 OUT_RING( (x << 16) | y );
626 OUT_RING( (w << 16) | h );
631 static void radeon_cp_performance_boxes( drm_radeon_private_t *dev_priv )
633 /* Collapse various things into a wait flag -- trying to
634 * guess if userspase slept -- better just to have them tell us.
636 if (dev_priv->stats.last_frame_reads > 1 ||
637 dev_priv->stats.last_clear_reads > dev_priv->stats.clears) {
638 dev_priv->stats.boxes |= RADEON_BOX_WAIT_IDLE;
641 if (dev_priv->stats.freelist_loops) {
642 dev_priv->stats.boxes |= RADEON_BOX_WAIT_IDLE;
645 /* Purple box for page flipping
647 if ( dev_priv->stats.boxes & RADEON_BOX_FLIP )
648 radeon_clear_box( dev_priv, 4, 4, 8, 8, 255, 0, 255 );
650 /* Red box if we have to wait for idle at any point
652 if ( dev_priv->stats.boxes & RADEON_BOX_WAIT_IDLE )
653 radeon_clear_box( dev_priv, 16, 4, 8, 8, 255, 0, 0 );
655 /* Blue box: lost context?
658 /* Yellow box for texture swaps
660 if ( dev_priv->stats.boxes & RADEON_BOX_TEXTURE_LOAD )
661 radeon_clear_box( dev_priv, 40, 4, 8, 8, 255, 255, 0 );
663 /* Green box if hardware never idles (as far as we can tell)
665 if ( !(dev_priv->stats.boxes & RADEON_BOX_DMA_IDLE) )
666 radeon_clear_box( dev_priv, 64, 4, 8, 8, 0, 255, 0 );
669 /* Draw bars indicating number of buffers allocated
670 * (not a great measure, easily confused)
672 if (dev_priv->stats.requested_bufs) {
673 if (dev_priv->stats.requested_bufs > 100)
674 dev_priv->stats.requested_bufs = 100;
676 radeon_clear_box( dev_priv, 4, 16,
677 dev_priv->stats.requested_bufs, 4,
681 memset( &dev_priv->stats, 0, sizeof(dev_priv->stats) );
684 /* ================================================================
685 * CP command dispatch functions
688 static void radeon_cp_dispatch_clear( drm_device_t *dev,
689 drm_radeon_clear_t *clear,
690 drm_radeon_clear_rect_t *depth_boxes )
692 drm_radeon_private_t *dev_priv = dev->dev_private;
693 drm_radeon_sarea_t *sarea_priv = dev_priv->sarea_priv;
694 drm_radeon_depth_clear_t *depth_clear = &dev_priv->depth_clear;
695 int nbox = sarea_priv->nbox;
696 drm_clip_rect_t *pbox = sarea_priv->boxes;
697 unsigned int flags = clear->flags;
698 u32 rb3d_cntl = 0, rb3d_stencilrefmask= 0;
701 DRM_DEBUG( "flags = 0x%x\n", flags );
703 dev_priv->stats.clears++;
705 if ( dev_priv->page_flipping && dev_priv->current_page == 1 ) {
706 unsigned int tmp = flags;
708 flags &= ~(RADEON_FRONT | RADEON_BACK);
709 if ( tmp & RADEON_FRONT ) flags |= RADEON_BACK;
710 if ( tmp & RADEON_BACK ) flags |= RADEON_FRONT;
713 if ( flags & (RADEON_FRONT | RADEON_BACK) ) {
717 /* Ensure the 3D stream is idle before doing a
718 * 2D fill to clear the front or back buffer.
720 RADEON_WAIT_UNTIL_3D_IDLE();
722 OUT_RING( CP_PACKET0( RADEON_DP_WRITE_MASK, 0 ) );
723 OUT_RING( clear->color_mask );
727 /* Make sure we restore the 3D state next time.
729 dev_priv->sarea_priv->ctx_owner = 0;
731 for ( i = 0 ; i < nbox ; i++ ) {
734 int w = pbox[i].x2 - x;
735 int h = pbox[i].y2 - y;
737 DRM_DEBUG( "dispatch clear %d,%d-%d,%d flags 0x%x\n",
740 if ( flags & RADEON_FRONT ) {
743 OUT_RING( CP_PACKET3( RADEON_CNTL_PAINT_MULTI, 4 ) );
744 OUT_RING( RADEON_GMC_DST_PITCH_OFFSET_CNTL |
745 RADEON_GMC_BRUSH_SOLID_COLOR |
746 (dev_priv->color_fmt << 8) |
747 RADEON_GMC_SRC_DATATYPE_COLOR |
749 RADEON_GMC_CLR_CMP_CNTL_DIS );
751 OUT_RING( dev_priv->front_pitch_offset );
752 OUT_RING( clear->clear_color );
754 OUT_RING( (x << 16) | y );
755 OUT_RING( (w << 16) | h );
760 if ( flags & RADEON_BACK ) {
763 OUT_RING( CP_PACKET3( RADEON_CNTL_PAINT_MULTI, 4 ) );
764 OUT_RING( RADEON_GMC_DST_PITCH_OFFSET_CNTL |
765 RADEON_GMC_BRUSH_SOLID_COLOR |
766 (dev_priv->color_fmt << 8) |
767 RADEON_GMC_SRC_DATATYPE_COLOR |
769 RADEON_GMC_CLR_CMP_CNTL_DIS );
771 OUT_RING( dev_priv->back_pitch_offset );
772 OUT_RING( clear->clear_color );
774 OUT_RING( (x << 16) | y );
775 OUT_RING( (w << 16) | h );
782 /* We have to clear the depth and/or stencil buffers by
783 * rendering a quad into just those buffers. Thus, we have to
784 * make sure the 3D engine is configured correctly.
786 if ( dev_priv->is_r200 &&
787 (flags & (RADEON_DEPTH | RADEON_STENCIL)) ) {
792 int tempRB3D_ZSTENCILCNTL;
793 int tempRB3D_STENCILREFMASK;
794 int tempRB3D_PLANEMASK;
797 int tempSE_VTX_FMT_0;
798 int tempSE_VTX_FMT_1;
800 int tempRE_AUX_SCISSOR_CNTL;
805 tempRB3D_CNTL = depth_clear->rb3d_cntl;
806 tempRB3D_CNTL &= ~(1<<15); /* unset radeon magic flag */
808 tempRB3D_ZSTENCILCNTL = depth_clear->rb3d_zstencilcntl;
809 tempRB3D_STENCILREFMASK = 0x0;
811 tempSE_CNTL = depth_clear->se_cntl;
817 tempSE_VAP_CNTL = (/* SE_VAP_CNTL__FORCE_W_TO_ONE_MASK | */
818 (0x9 << SE_VAP_CNTL__VF_MAX_VTX_NUM__SHIFT));
820 tempRB3D_PLANEMASK = 0x0;
822 tempRE_AUX_SCISSOR_CNTL = 0x0;
825 SE_VTE_CNTL__VTX_XY_FMT_MASK |
826 SE_VTE_CNTL__VTX_Z_FMT_MASK;
828 /* Vertex format (X, Y, Z, W)*/
830 SE_VTX_FMT_0__VTX_Z0_PRESENT_MASK |
831 SE_VTX_FMT_0__VTX_W0_PRESENT_MASK;
832 tempSE_VTX_FMT_1 = 0x0;
836 * Depth buffer specific enables
838 if (flags & RADEON_DEPTH) {
839 /* Enable depth buffer */
840 tempRB3D_CNTL |= RADEON_Z_ENABLE;
842 /* Disable depth buffer */
843 tempRB3D_CNTL &= ~RADEON_Z_ENABLE;
847 * Stencil buffer specific enables
849 if ( flags & RADEON_STENCIL ) {
850 tempRB3D_CNTL |= RADEON_STENCIL_ENABLE;
851 tempRB3D_STENCILREFMASK = clear->depth_mask;
853 tempRB3D_CNTL &= ~RADEON_STENCIL_ENABLE;
854 tempRB3D_STENCILREFMASK = 0x00000000;
858 RADEON_WAIT_UNTIL_2D_IDLE();
860 OUT_RING_REG( RADEON_PP_CNTL, tempPP_CNTL );
861 OUT_RING_REG( R200_RE_CNTL, tempRE_CNTL );
862 OUT_RING_REG( RADEON_RB3D_CNTL, tempRB3D_CNTL );
863 OUT_RING_REG( RADEON_RB3D_ZSTENCILCNTL,
864 tempRB3D_ZSTENCILCNTL );
865 OUT_RING_REG( RADEON_RB3D_STENCILREFMASK,
866 tempRB3D_STENCILREFMASK );
867 OUT_RING_REG( RADEON_RB3D_PLANEMASK, tempRB3D_PLANEMASK );
868 OUT_RING_REG( RADEON_SE_CNTL, tempSE_CNTL );
869 OUT_RING_REG( R200_SE_VTE_CNTL, tempSE_VTE_CNTL );
870 OUT_RING_REG( R200_SE_VTX_FMT_0, tempSE_VTX_FMT_0 );
871 OUT_RING_REG( R200_SE_VTX_FMT_1, tempSE_VTX_FMT_1 );
872 OUT_RING_REG( R200_SE_VAP_CNTL, tempSE_VAP_CNTL );
873 OUT_RING_REG( R200_RE_AUX_SCISSOR_CNTL,
874 tempRE_AUX_SCISSOR_CNTL );
877 /* Make sure we restore the 3D state next time.
879 dev_priv->sarea_priv->ctx_owner = 0;
881 for ( i = 0 ; i < nbox ; i++ ) {
883 /* Funny that this should be required --
886 radeon_emit_clip_rect( dev_priv,
887 &sarea_priv->boxes[i] );
890 OUT_RING( CP_PACKET3( R200_3D_DRAW_IMMD_2, 12 ) );
891 OUT_RING( (RADEON_PRIM_TYPE_RECT_LIST |
892 RADEON_PRIM_WALK_RING |
893 (3 << RADEON_NUM_VERTICES_SHIFT)) );
894 OUT_RING( depth_boxes[i].ui[CLEAR_X1] );
895 OUT_RING( depth_boxes[i].ui[CLEAR_Y1] );
896 OUT_RING( depth_boxes[i].ui[CLEAR_DEPTH] );
897 OUT_RING( 0x3f800000 );
898 OUT_RING( depth_boxes[i].ui[CLEAR_X1] );
899 OUT_RING( depth_boxes[i].ui[CLEAR_Y2] );
900 OUT_RING( depth_boxes[i].ui[CLEAR_DEPTH] );
901 OUT_RING( 0x3f800000 );
902 OUT_RING( depth_boxes[i].ui[CLEAR_X2] );
903 OUT_RING( depth_boxes[i].ui[CLEAR_Y2] );
904 OUT_RING( depth_boxes[i].ui[CLEAR_DEPTH] );
905 OUT_RING( 0x3f800000 );
909 else if ( (flags & (RADEON_DEPTH | RADEON_STENCIL)) ) {
911 rb3d_cntl = depth_clear->rb3d_cntl;
913 if ( flags & RADEON_DEPTH ) {
914 rb3d_cntl |= RADEON_Z_ENABLE;
916 rb3d_cntl &= ~RADEON_Z_ENABLE;
919 if ( flags & RADEON_STENCIL ) {
920 rb3d_cntl |= RADEON_STENCIL_ENABLE;
921 rb3d_stencilrefmask = clear->depth_mask; /* misnamed field */
923 rb3d_cntl &= ~RADEON_STENCIL_ENABLE;
924 rb3d_stencilrefmask = 0x00000000;
928 RADEON_WAIT_UNTIL_2D_IDLE();
930 OUT_RING( CP_PACKET0( RADEON_PP_CNTL, 1 ) );
931 OUT_RING( 0x00000000 );
932 OUT_RING( rb3d_cntl );
934 OUT_RING_REG( RADEON_RB3D_ZSTENCILCNTL,
935 depth_clear->rb3d_zstencilcntl );
936 OUT_RING_REG( RADEON_RB3D_STENCILREFMASK,
937 rb3d_stencilrefmask );
938 OUT_RING_REG( RADEON_RB3D_PLANEMASK,
940 OUT_RING_REG( RADEON_SE_CNTL,
941 depth_clear->se_cntl );
944 /* Make sure we restore the 3D state next time.
946 dev_priv->sarea_priv->ctx_owner = 0;
948 for ( i = 0 ; i < nbox ; i++ ) {
950 /* Funny that this should be required --
953 radeon_emit_clip_rect( dev_priv,
954 &sarea_priv->boxes[i] );
958 OUT_RING( CP_PACKET3( RADEON_3D_DRAW_IMMD, 13 ) );
959 OUT_RING( RADEON_VTX_Z_PRESENT |
960 RADEON_VTX_PKCOLOR_PRESENT);
961 OUT_RING( (RADEON_PRIM_TYPE_RECT_LIST |
962 RADEON_PRIM_WALK_RING |
964 RADEON_VTX_FMT_RADEON_MODE |
965 (3 << RADEON_NUM_VERTICES_SHIFT)) );
968 OUT_RING( depth_boxes[i].ui[CLEAR_X1] );
969 OUT_RING( depth_boxes[i].ui[CLEAR_Y1] );
970 OUT_RING( depth_boxes[i].ui[CLEAR_DEPTH] );
973 OUT_RING( depth_boxes[i].ui[CLEAR_X1] );
974 OUT_RING( depth_boxes[i].ui[CLEAR_Y2] );
975 OUT_RING( depth_boxes[i].ui[CLEAR_DEPTH] );
978 OUT_RING( depth_boxes[i].ui[CLEAR_X2] );
979 OUT_RING( depth_boxes[i].ui[CLEAR_Y2] );
980 OUT_RING( depth_boxes[i].ui[CLEAR_DEPTH] );
987 /* Increment the clear counter. The client-side 3D driver must
988 * wait on this value before performing the clear ioctl. We
989 * need this because the card's so damned fast...
991 dev_priv->sarea_priv->last_clear++;
995 RADEON_CLEAR_AGE( dev_priv->sarea_priv->last_clear );
996 RADEON_WAIT_UNTIL_IDLE();
1001 static void radeon_cp_dispatch_swap( drm_device_t *dev )
1003 drm_radeon_private_t *dev_priv = dev->dev_private;
1004 drm_radeon_sarea_t *sarea_priv = dev_priv->sarea_priv;
1005 int nbox = sarea_priv->nbox;
1006 drm_clip_rect_t *pbox = sarea_priv->boxes;
1011 /* Do some trivial performance monitoring...
1013 if (dev_priv->do_boxes)
1014 radeon_cp_performance_boxes( dev_priv );
1017 /* Wait for the 3D stream to idle before dispatching the bitblt.
1018 * This will prevent data corruption between the two streams.
1022 RADEON_WAIT_UNTIL_3D_IDLE();
1026 for ( i = 0 ; i < nbox ; i++ ) {
1029 int w = pbox[i].x2 - x;
1030 int h = pbox[i].y2 - y;
1032 DRM_DEBUG( "dispatch swap %d,%d-%d,%d\n",
1037 OUT_RING( CP_PACKET3( RADEON_CNTL_BITBLT_MULTI, 5 ) );
1038 OUT_RING( RADEON_GMC_SRC_PITCH_OFFSET_CNTL |
1039 RADEON_GMC_DST_PITCH_OFFSET_CNTL |
1040 RADEON_GMC_BRUSH_NONE |
1041 (dev_priv->color_fmt << 8) |
1042 RADEON_GMC_SRC_DATATYPE_COLOR |
1044 RADEON_DP_SRC_SOURCE_MEMORY |
1045 RADEON_GMC_CLR_CMP_CNTL_DIS |
1046 RADEON_GMC_WR_MSK_DIS );
1048 /* Make this work even if front & back are flipped:
1050 if (dev_priv->current_page == 0) {
1051 OUT_RING( dev_priv->back_pitch_offset );
1052 OUT_RING( dev_priv->front_pitch_offset );
1055 OUT_RING( dev_priv->front_pitch_offset );
1056 OUT_RING( dev_priv->back_pitch_offset );
1059 OUT_RING( (x << 16) | y );
1060 OUT_RING( (x << 16) | y );
1061 OUT_RING( (w << 16) | h );
1066 /* Increment the frame counter. The client-side 3D driver must
1067 * throttle the framerate by waiting for this value before
1068 * performing the swapbuffer ioctl.
1070 dev_priv->sarea_priv->last_frame++;
1074 RADEON_FRAME_AGE( dev_priv->sarea_priv->last_frame );
1075 RADEON_WAIT_UNTIL_2D_IDLE();
1080 static void radeon_cp_dispatch_flip( drm_device_t *dev )
1082 drm_radeon_private_t *dev_priv = dev->dev_private;
1083 drm_sarea_t *sarea = (drm_sarea_t *)dev_priv->sarea->handle;
1084 int offset = (dev_priv->current_page == 1)
1085 ? dev_priv->front_offset : dev_priv->back_offset;
1087 DRM_DEBUG( "%s: page=%d pfCurrentPage=%d\n",
1089 dev_priv->current_page,
1090 dev_priv->sarea_priv->pfCurrentPage);
1092 /* Do some trivial performance monitoring...
1094 if (dev_priv->do_boxes) {
1095 dev_priv->stats.boxes |= RADEON_BOX_FLIP;
1096 radeon_cp_performance_boxes( dev_priv );
1099 /* Update the frame offsets for both CRTCs
1103 RADEON_WAIT_UNTIL_3D_IDLE();
1104 OUT_RING_REG( RADEON_CRTC_OFFSET, ( ( sarea->frame.y * dev_priv->front_pitch
1106 * ( dev_priv->color_fmt - 2 ) ) & ~7 )
1108 OUT_RING_REG( RADEON_CRTC2_OFFSET, dev_priv->sarea_priv->crtc2_base
1113 /* Increment the frame counter. The client-side 3D driver must
1114 * throttle the framerate by waiting for this value before
1115 * performing the swapbuffer ioctl.
1117 dev_priv->sarea_priv->last_frame++;
1118 dev_priv->sarea_priv->pfCurrentPage = dev_priv->current_page =
1119 1 - dev_priv->current_page;
1123 RADEON_FRAME_AGE( dev_priv->sarea_priv->last_frame );
1128 static int bad_prim_vertex_nr( int primitive, int nr )
1130 switch (primitive & RADEON_PRIM_TYPE_MASK) {
1131 case RADEON_PRIM_TYPE_NONE:
1132 case RADEON_PRIM_TYPE_POINT:
1134 case RADEON_PRIM_TYPE_LINE:
1135 return (nr & 1) || nr == 0;
1136 case RADEON_PRIM_TYPE_LINE_STRIP:
1138 case RADEON_PRIM_TYPE_TRI_LIST:
1139 case RADEON_PRIM_TYPE_3VRT_POINT_LIST:
1140 case RADEON_PRIM_TYPE_3VRT_LINE_LIST:
1141 case RADEON_PRIM_TYPE_RECT_LIST:
1142 return nr % 3 || nr == 0;
1143 case RADEON_PRIM_TYPE_TRI_FAN:
1144 case RADEON_PRIM_TYPE_TRI_STRIP:
1155 unsigned int finish;
1157 unsigned int numverts;
1158 unsigned int offset;
1159 unsigned int vc_format;
1160 } drm_radeon_tcl_prim_t;
1162 static void radeon_cp_dispatch_vertex( drm_device_t *dev,
1164 drm_radeon_tcl_prim_t *prim )
1167 drm_radeon_private_t *dev_priv = dev->dev_private;
1168 drm_radeon_sarea_t *sarea_priv = dev_priv->sarea_priv;
1169 int offset = dev_priv->gart_buffers_offset + buf->offset + prim->start;
1170 int numverts = (int)prim->numverts;
1171 int nbox = sarea_priv->nbox;
1175 DRM_DEBUG("hwprim 0x%x vfmt 0x%x %d..%d %d verts\n",
1182 if (bad_prim_vertex_nr( prim->prim, prim->numverts )) {
1183 DRM_ERROR( "bad prim %x numverts %d\n",
1184 prim->prim, prim->numverts );
1189 /* Emit the next cliprect */
1191 radeon_emit_clip_rect( dev_priv,
1192 &sarea_priv->boxes[i] );
1195 /* Emit the vertex buffer rendering commands */
1198 OUT_RING( CP_PACKET3( RADEON_3D_RNDR_GEN_INDX_PRIM, 3 ) );
1200 OUT_RING( numverts );
1201 OUT_RING( prim->vc_format );
1202 OUT_RING( prim->prim | RADEON_PRIM_WALK_LIST |
1203 RADEON_COLOR_ORDER_RGBA |
1204 RADEON_VTX_FMT_RADEON_MODE |
1205 (numverts << RADEON_NUM_VERTICES_SHIFT) );
1210 } while ( i < nbox );
1215 static void radeon_cp_discard_buffer( drm_device_t *dev, drm_buf_t *buf )
1217 drm_radeon_private_t *dev_priv = dev->dev_private;
1218 drm_radeon_buf_priv_t *buf_priv = buf->dev_private;
1221 buf_priv->age = ++dev_priv->sarea_priv->last_dispatch;
1223 /* Emit the vertex buffer age */
1225 RADEON_DISPATCH_AGE( buf_priv->age );
1232 static void radeon_cp_dispatch_indirect( drm_device_t *dev,
1234 int start, int end )
1236 drm_radeon_private_t *dev_priv = dev->dev_private;
1238 DRM_DEBUG( "indirect: buf=%d s=0x%x e=0x%x\n",
1239 buf->idx, start, end );
1241 if ( start != end ) {
1242 int offset = (dev_priv->gart_buffers_offset
1243 + buf->offset + start);
1244 int dwords = (end - start + 3) / sizeof(u32);
1246 /* Indirect buffer data must be an even number of
1247 * dwords, so if we've been given an odd number we must
1248 * pad the data with a Type-2 CP packet.
1252 ((char *)dev->agp_buffer_map->handle
1253 + buf->offset + start);
1254 data[dwords++] = RADEON_CP_PACKET2;
1257 /* Fire off the indirect buffer */
1260 OUT_RING( CP_PACKET0( RADEON_CP_IB_BASE, 1 ) );
1269 static void radeon_cp_dispatch_indices( drm_device_t *dev,
1271 drm_radeon_tcl_prim_t *prim )
1273 drm_radeon_private_t *dev_priv = dev->dev_private;
1274 drm_radeon_sarea_t *sarea_priv = dev_priv->sarea_priv;
1275 int offset = dev_priv->gart_buffers_offset + prim->offset;
1279 int start = prim->start + RADEON_INDEX_PRIM_OFFSET;
1280 int count = (prim->finish - start) / sizeof(u16);
1281 int nbox = sarea_priv->nbox;
1283 DRM_DEBUG("hwprim 0x%x vfmt 0x%x %d..%d offset: %x nr %d\n",
1291 if (bad_prim_vertex_nr( prim->prim, count )) {
1292 DRM_ERROR( "bad prim %x count %d\n",
1293 prim->prim, count );
1298 if ( start >= prim->finish ||
1299 (prim->start & 0x7) ) {
1300 DRM_ERROR( "buffer prim %d\n", prim->prim );
1304 dwords = (prim->finish - prim->start + 3) / sizeof(u32);
1306 data = (u32 *)((char *)dev->agp_buffer_map->handle +
1307 elt_buf->offset + prim->start);
1309 data[0] = CP_PACKET3( RADEON_3D_RNDR_GEN_INDX_PRIM, dwords-2 );
1311 data[2] = prim->numverts;
1312 data[3] = prim->vc_format;
1313 data[4] = (prim->prim |
1314 RADEON_PRIM_WALK_IND |
1315 RADEON_COLOR_ORDER_RGBA |
1316 RADEON_VTX_FMT_RADEON_MODE |
1317 (count << RADEON_NUM_VERTICES_SHIFT) );
1321 radeon_emit_clip_rect( dev_priv,
1322 &sarea_priv->boxes[i] );
1324 radeon_cp_dispatch_indirect( dev, elt_buf,
1329 } while ( i < nbox );
1333 #define RADEON_MAX_TEXTURE_SIZE (RADEON_BUFFER_SIZE - 8 * sizeof(u32))
1335 static int radeon_cp_dispatch_texture( DRMFILE filp,
1337 drm_radeon_texture_t *tex,
1338 drm_radeon_tex_image_t *image )
1340 drm_radeon_private_t *dev_priv = dev->dev_private;
1341 drm_file_t *filp_priv;
1345 const u8 __user *data;
1346 int size, dwords, tex_width, blit_width;
1351 DRM_GET_PRIV_WITH_RETURN( filp_priv, filp );
1353 if ( radeon_check_and_fixup_offset( dev_priv, filp_priv, &tex->offset ) ) {
1354 DRM_ERROR( "Invalid destination offset\n" );
1355 return DRM_ERR( EINVAL );
1358 dev_priv->stats.boxes |= RADEON_BOX_TEXTURE_LOAD;
1360 /* Flush the pixel cache. This ensures no pixel data gets mixed
1361 * up with the texture data from the host data blit, otherwise
1362 * part of the texture image may be corrupted.
1365 RADEON_FLUSH_CACHE();
1366 RADEON_WAIT_UNTIL_IDLE();
1370 /* The Mesa texture functions provide the data in little endian as the
1371 * chip wants it, but we need to compensate for the fact that the CP
1372 * ring gets byte-swapped
1375 OUT_RING_REG( RADEON_RBBM_GUICNTL, RADEON_HOST_DATA_SWAP_32BIT );
1380 /* The compiler won't optimize away a division by a variable,
1381 * even if the only legal values are powers of two. Thus, we'll
1382 * use a shift instead.
1384 switch ( tex->format ) {
1385 case RADEON_TXFORMAT_ARGB8888:
1386 case RADEON_TXFORMAT_RGBA8888:
1387 format = RADEON_COLOR_FORMAT_ARGB8888;
1388 tex_width = tex->width * 4;
1389 blit_width = image->width * 4;
1391 case RADEON_TXFORMAT_AI88:
1392 case RADEON_TXFORMAT_ARGB1555:
1393 case RADEON_TXFORMAT_RGB565:
1394 case RADEON_TXFORMAT_ARGB4444:
1395 case RADEON_TXFORMAT_VYUY422:
1396 case RADEON_TXFORMAT_YVYU422:
1397 format = RADEON_COLOR_FORMAT_RGB565;
1398 tex_width = tex->width * 2;
1399 blit_width = image->width * 2;
1401 case RADEON_TXFORMAT_I8:
1402 case RADEON_TXFORMAT_RGB332:
1403 format = RADEON_COLOR_FORMAT_CI8;
1404 tex_width = tex->width * 1;
1405 blit_width = image->width * 1;
1408 DRM_ERROR( "invalid texture format %d\n", tex->format );
1409 return DRM_ERR(EINVAL);
1412 DRM_DEBUG("tex=%dx%d blit=%d\n", tex_width, tex->height, blit_width );
1415 DRM_DEBUG( "tex: ofs=0x%x p=%d f=%d x=%hd y=%hd w=%hd h=%hd\n",
1416 tex->offset >> 10, tex->pitch, tex->format,
1417 image->x, image->y, image->width, image->height );
1419 /* Make a copy of some parameters in case we have to
1420 * update them for a multi-pass texture blit.
1422 height = image->height;
1423 data = (const u8 __user *)image->data;
1425 size = height * blit_width;
1427 if ( size > RADEON_MAX_TEXTURE_SIZE ) {
1428 height = RADEON_MAX_TEXTURE_SIZE / blit_width;
1429 size = height * blit_width;
1430 } else if ( size < 4 && size > 0 ) {
1432 } else if ( size == 0 ) {
1436 buf = radeon_freelist_get( dev );
1438 radeon_do_cp_idle( dev_priv );
1439 buf = radeon_freelist_get( dev );
1442 DRM_DEBUG("radeon_cp_dispatch_texture: EAGAIN\n");
1443 DRM_COPY_TO_USER( tex->image, image, sizeof(*image) );
1444 return DRM_ERR(EAGAIN);
1448 /* Dispatch the indirect buffer.
1450 buffer = (u32*)((char*)dev->agp_buffer_map->handle + buf->offset);
1452 buffer[0] = CP_PACKET3( RADEON_CNTL_HOSTDATA_BLT, dwords + 6 );
1453 buffer[1] = (RADEON_GMC_DST_PITCH_OFFSET_CNTL |
1454 RADEON_GMC_BRUSH_NONE |
1456 RADEON_GMC_SRC_DATATYPE_COLOR |
1458 RADEON_DP_SRC_SOURCE_HOST_DATA |
1459 RADEON_GMC_CLR_CMP_CNTL_DIS |
1460 RADEON_GMC_WR_MSK_DIS);
1462 buffer[2] = (tex->pitch << 22) | (tex->offset >> 10);
1463 buffer[3] = 0xffffffff;
1464 buffer[4] = 0xffffffff;
1465 buffer[5] = (image->y << 16) | image->x;
1466 buffer[6] = (height << 16) | image->width;
1470 if ( tex_width >= 32 ) {
1471 /* Texture image width is larger than the minimum, so we
1472 * can upload it directly.
1474 if ( DRM_COPY_FROM_USER( buffer, data,
1475 dwords * sizeof(u32) ) ) {
1476 DRM_ERROR( "EFAULT on data, %d dwords\n",
1478 return DRM_ERR(EFAULT);
1481 /* Texture image width is less than the minimum, so we
1482 * need to pad out each image scanline to the minimum
1485 for ( i = 0 ; i < tex->height ; i++ ) {
1486 if ( DRM_COPY_FROM_USER( buffer, data,
1488 DRM_ERROR( "EFAULT on pad, %d bytes\n",
1490 return DRM_ERR(EFAULT);
1498 buf->used = (dwords + 8) * sizeof(u32);
1499 radeon_cp_dispatch_indirect( dev, buf, 0, buf->used );
1500 radeon_cp_discard_buffer( dev, buf );
1502 /* Update the input parameters for next time */
1504 image->height -= height;
1505 image->data = (const u8 __user *)image->data + size;
1506 } while (image->height > 0);
1508 /* Flush the pixel cache after the blit completes. This ensures
1509 * the texture data is written out to memory before rendering
1513 RADEON_FLUSH_CACHE();
1514 RADEON_WAIT_UNTIL_2D_IDLE();
1520 static void radeon_cp_dispatch_stipple( drm_device_t *dev, u32 *stipple )
1522 drm_radeon_private_t *dev_priv = dev->dev_private;
1529 OUT_RING( CP_PACKET0( RADEON_RE_STIPPLE_ADDR, 0 ) );
1530 OUT_RING( 0x00000000 );
1532 OUT_RING( CP_PACKET0_TABLE( RADEON_RE_STIPPLE_DATA, 31 ) );
1533 for ( i = 0 ; i < 32 ; i++ ) {
1534 OUT_RING( stipple[i] );
1541 /* ================================================================
1545 int radeon_cp_clear( DRM_IOCTL_ARGS )
1548 drm_radeon_private_t *dev_priv = dev->dev_private;
1549 drm_radeon_sarea_t *sarea_priv = dev_priv->sarea_priv;
1550 drm_radeon_clear_t clear;
1551 drm_radeon_clear_rect_t depth_boxes[RADEON_NR_SAREA_CLIPRECTS];
1554 LOCK_TEST_WITH_RETURN( dev, filp );
1556 DRM_COPY_FROM_USER_IOCTL( clear, (drm_radeon_clear_t __user *)data,
1559 RING_SPACE_TEST_WITH_RETURN( dev_priv );
1561 if ( sarea_priv->nbox > RADEON_NR_SAREA_CLIPRECTS )
1562 sarea_priv->nbox = RADEON_NR_SAREA_CLIPRECTS;
1564 if ( DRM_COPY_FROM_USER( &depth_boxes, clear.depth_boxes,
1565 sarea_priv->nbox * sizeof(depth_boxes[0]) ) )
1566 return DRM_ERR(EFAULT);
1568 radeon_cp_dispatch_clear( dev, &clear, depth_boxes );
1575 /* Not sure why this isn't set all the time:
1577 static int radeon_do_init_pageflip( drm_device_t *dev )
1579 drm_radeon_private_t *dev_priv = dev->dev_private;
1585 RADEON_WAIT_UNTIL_3D_IDLE();
1586 OUT_RING( CP_PACKET0( RADEON_CRTC_OFFSET_CNTL, 0 ) );
1587 OUT_RING( RADEON_READ( RADEON_CRTC_OFFSET_CNTL ) | RADEON_CRTC_OFFSET_FLIP_CNTL );
1588 OUT_RING( CP_PACKET0( RADEON_CRTC2_OFFSET_CNTL, 0 ) );
1589 OUT_RING( RADEON_READ( RADEON_CRTC2_OFFSET_CNTL ) | RADEON_CRTC_OFFSET_FLIP_CNTL );
1592 dev_priv->page_flipping = 1;
1593 dev_priv->current_page = 0;
1594 dev_priv->sarea_priv->pfCurrentPage = dev_priv->current_page;
1599 /* Called whenever a client dies, from DRM(release).
1600 * NOTE: Lock isn't necessarily held when this is called!
1602 int radeon_do_cleanup_pageflip( drm_device_t *dev )
1604 drm_radeon_private_t *dev_priv = dev->dev_private;
1607 if (dev_priv->current_page != 0)
1608 radeon_cp_dispatch_flip( dev );
1610 dev_priv->page_flipping = 0;
1614 /* Swapping and flipping are different operations, need different ioctls.
1615 * They can & should be intermixed to support multiple 3d windows.
1617 int radeon_cp_flip( DRM_IOCTL_ARGS )
1620 drm_radeon_private_t *dev_priv = dev->dev_private;
1623 LOCK_TEST_WITH_RETURN( dev, filp );
1625 RING_SPACE_TEST_WITH_RETURN( dev_priv );
1627 if (!dev_priv->page_flipping)
1628 radeon_do_init_pageflip( dev );
1630 radeon_cp_dispatch_flip( dev );
1636 int radeon_cp_swap( DRM_IOCTL_ARGS )
1639 drm_radeon_private_t *dev_priv = dev->dev_private;
1640 drm_radeon_sarea_t *sarea_priv = dev_priv->sarea_priv;
1643 LOCK_TEST_WITH_RETURN( dev, filp );
1645 RING_SPACE_TEST_WITH_RETURN( dev_priv );
1647 if ( sarea_priv->nbox > RADEON_NR_SAREA_CLIPRECTS )
1648 sarea_priv->nbox = RADEON_NR_SAREA_CLIPRECTS;
1650 radeon_cp_dispatch_swap( dev );
1651 dev_priv->sarea_priv->ctx_owner = 0;
1657 int radeon_cp_vertex( DRM_IOCTL_ARGS )
1660 drm_radeon_private_t *dev_priv = dev->dev_private;
1661 drm_file_t *filp_priv;
1662 drm_radeon_sarea_t *sarea_priv = dev_priv->sarea_priv;
1663 drm_device_dma_t *dma = dev->dma;
1665 drm_radeon_vertex_t vertex;
1666 drm_radeon_tcl_prim_t prim;
1668 LOCK_TEST_WITH_RETURN( dev, filp );
1670 DRM_GET_PRIV_WITH_RETURN( filp_priv, filp );
1672 DRM_COPY_FROM_USER_IOCTL( vertex, (drm_radeon_vertex_t __user *)data,
1675 DRM_DEBUG( "pid=%d index=%d count=%d discard=%d\n",
1677 vertex.idx, vertex.count, vertex.discard );
1679 if ( vertex.idx < 0 || vertex.idx >= dma->buf_count ) {
1680 DRM_ERROR( "buffer index %d (of %d max)\n",
1681 vertex.idx, dma->buf_count - 1 );
1682 return DRM_ERR(EINVAL);
1684 if ( vertex.prim < 0 ||
1685 vertex.prim > RADEON_PRIM_TYPE_3VRT_LINE_LIST ) {
1686 DRM_ERROR( "buffer prim %d\n", vertex.prim );
1687 return DRM_ERR(EINVAL);
1690 RING_SPACE_TEST_WITH_RETURN( dev_priv );
1691 VB_AGE_TEST_WITH_RETURN( dev_priv );
1693 buf = dma->buflist[vertex.idx];
1695 if ( buf->filp != filp ) {
1696 DRM_ERROR( "process %d using buffer owned by %p\n",
1697 DRM_CURRENTPID, buf->filp );
1698 return DRM_ERR(EINVAL);
1700 if ( buf->pending ) {
1701 DRM_ERROR( "sending pending buffer %d\n", vertex.idx );
1702 return DRM_ERR(EINVAL);
1705 /* Build up a prim_t record:
1708 buf->used = vertex.count; /* not used? */
1710 if ( sarea_priv->dirty & ~RADEON_UPLOAD_CLIPRECTS ) {
1711 if ( radeon_emit_state( dev_priv, filp_priv,
1712 &sarea_priv->context_state,
1713 sarea_priv->tex_state,
1714 sarea_priv->dirty ) ) {
1715 DRM_ERROR( "radeon_emit_state failed\n" );
1716 return DRM_ERR( EINVAL );
1719 sarea_priv->dirty &= ~(RADEON_UPLOAD_TEX0IMAGES |
1720 RADEON_UPLOAD_TEX1IMAGES |
1721 RADEON_UPLOAD_TEX2IMAGES |
1722 RADEON_REQUIRE_QUIESCENCE);
1726 prim.finish = vertex.count; /* unused */
1727 prim.prim = vertex.prim;
1728 prim.numverts = vertex.count;
1729 prim.vc_format = dev_priv->sarea_priv->vc_format;
1731 radeon_cp_dispatch_vertex( dev, buf, &prim );
1734 if (vertex.discard) {
1735 radeon_cp_discard_buffer( dev, buf );
1742 int radeon_cp_indices( DRM_IOCTL_ARGS )
1745 drm_radeon_private_t *dev_priv = dev->dev_private;
1746 drm_file_t *filp_priv;
1747 drm_radeon_sarea_t *sarea_priv = dev_priv->sarea_priv;
1748 drm_device_dma_t *dma = dev->dma;
1750 drm_radeon_indices_t elts;
1751 drm_radeon_tcl_prim_t prim;
1754 LOCK_TEST_WITH_RETURN( dev, filp );
1757 DRM_ERROR( "%s called with no initialization\n", __FUNCTION__ );
1758 return DRM_ERR(EINVAL);
1761 DRM_GET_PRIV_WITH_RETURN( filp_priv, filp );
1763 DRM_COPY_FROM_USER_IOCTL( elts, (drm_radeon_indices_t __user *)data,
1766 DRM_DEBUG( "pid=%d index=%d start=%d end=%d discard=%d\n",
1768 elts.idx, elts.start, elts.end, elts.discard );
1770 if ( elts.idx < 0 || elts.idx >= dma->buf_count ) {
1771 DRM_ERROR( "buffer index %d (of %d max)\n",
1772 elts.idx, dma->buf_count - 1 );
1773 return DRM_ERR(EINVAL);
1775 if ( elts.prim < 0 ||
1776 elts.prim > RADEON_PRIM_TYPE_3VRT_LINE_LIST ) {
1777 DRM_ERROR( "buffer prim %d\n", elts.prim );
1778 return DRM_ERR(EINVAL);
1781 RING_SPACE_TEST_WITH_RETURN( dev_priv );
1782 VB_AGE_TEST_WITH_RETURN( dev_priv );
1784 buf = dma->buflist[elts.idx];
1786 if ( buf->filp != filp ) {
1787 DRM_ERROR( "process %d using buffer owned by %p\n",
1788 DRM_CURRENTPID, buf->filp );
1789 return DRM_ERR(EINVAL);
1791 if ( buf->pending ) {
1792 DRM_ERROR( "sending pending buffer %d\n", elts.idx );
1793 return DRM_ERR(EINVAL);
1796 count = (elts.end - elts.start) / sizeof(u16);
1797 elts.start -= RADEON_INDEX_PRIM_OFFSET;
1799 if ( elts.start & 0x7 ) {
1800 DRM_ERROR( "misaligned buffer 0x%x\n", elts.start );
1801 return DRM_ERR(EINVAL);
1803 if ( elts.start < buf->used ) {
1804 DRM_ERROR( "no header 0x%x - 0x%x\n", elts.start, buf->used );
1805 return DRM_ERR(EINVAL);
1808 buf->used = elts.end;
1810 if ( sarea_priv->dirty & ~RADEON_UPLOAD_CLIPRECTS ) {
1811 if ( radeon_emit_state( dev_priv, filp_priv,
1812 &sarea_priv->context_state,
1813 sarea_priv->tex_state,
1814 sarea_priv->dirty ) ) {
1815 DRM_ERROR( "radeon_emit_state failed\n" );
1816 return DRM_ERR( EINVAL );
1819 sarea_priv->dirty &= ~(RADEON_UPLOAD_TEX0IMAGES |
1820 RADEON_UPLOAD_TEX1IMAGES |
1821 RADEON_UPLOAD_TEX2IMAGES |
1822 RADEON_REQUIRE_QUIESCENCE);
1826 /* Build up a prim_t record:
1828 prim.start = elts.start;
1829 prim.finish = elts.end;
1830 prim.prim = elts.prim;
1831 prim.offset = 0; /* offset from start of dma buffers */
1832 prim.numverts = RADEON_MAX_VB_VERTS; /* duh */
1833 prim.vc_format = dev_priv->sarea_priv->vc_format;
1835 radeon_cp_dispatch_indices( dev, buf, &prim );
1837 radeon_cp_discard_buffer( dev, buf );
1844 int radeon_cp_texture( DRM_IOCTL_ARGS )
1847 drm_radeon_private_t *dev_priv = dev->dev_private;
1848 drm_radeon_texture_t tex;
1849 drm_radeon_tex_image_t image;
1852 LOCK_TEST_WITH_RETURN( dev, filp );
1854 DRM_COPY_FROM_USER_IOCTL( tex, (drm_radeon_texture_t __user *)data, sizeof(tex) );
1856 if ( tex.image == NULL ) {
1857 DRM_ERROR( "null texture image!\n" );
1858 return DRM_ERR(EINVAL);
1861 if ( DRM_COPY_FROM_USER( &image,
1862 (drm_radeon_tex_image_t __user *)tex.image,
1864 return DRM_ERR(EFAULT);
1866 RING_SPACE_TEST_WITH_RETURN( dev_priv );
1867 VB_AGE_TEST_WITH_RETURN( dev_priv );
1869 ret = radeon_cp_dispatch_texture( filp, dev, &tex, &image );
1875 int radeon_cp_stipple( DRM_IOCTL_ARGS )
1878 drm_radeon_private_t *dev_priv = dev->dev_private;
1879 drm_radeon_stipple_t stipple;
1882 LOCK_TEST_WITH_RETURN( dev, filp );
1884 DRM_COPY_FROM_USER_IOCTL( stipple, (drm_radeon_stipple_t __user *)data,
1887 if ( DRM_COPY_FROM_USER( &mask, stipple.mask, 32 * sizeof(u32) ) )
1888 return DRM_ERR(EFAULT);
1890 RING_SPACE_TEST_WITH_RETURN( dev_priv );
1892 radeon_cp_dispatch_stipple( dev, mask );
1898 int radeon_cp_indirect( DRM_IOCTL_ARGS )
1901 drm_radeon_private_t *dev_priv = dev->dev_private;
1902 drm_device_dma_t *dma = dev->dma;
1904 drm_radeon_indirect_t indirect;
1907 LOCK_TEST_WITH_RETURN( dev, filp );
1910 DRM_ERROR( "%s called with no initialization\n", __FUNCTION__ );
1911 return DRM_ERR(EINVAL);
1914 DRM_COPY_FROM_USER_IOCTL( indirect, (drm_radeon_indirect_t __user *)data,
1917 DRM_DEBUG( "indirect: idx=%d s=%d e=%d d=%d\n",
1918 indirect.idx, indirect.start,
1919 indirect.end, indirect.discard );
1921 if ( indirect.idx < 0 || indirect.idx >= dma->buf_count ) {
1922 DRM_ERROR( "buffer index %d (of %d max)\n",
1923 indirect.idx, dma->buf_count - 1 );
1924 return DRM_ERR(EINVAL);
1927 buf = dma->buflist[indirect.idx];
1929 if ( buf->filp != filp ) {
1930 DRM_ERROR( "process %d using buffer owned by %p\n",
1931 DRM_CURRENTPID, buf->filp );
1932 return DRM_ERR(EINVAL);
1934 if ( buf->pending ) {
1935 DRM_ERROR( "sending pending buffer %d\n", indirect.idx );
1936 return DRM_ERR(EINVAL);
1939 if ( indirect.start < buf->used ) {
1940 DRM_ERROR( "reusing indirect: start=0x%x actual=0x%x\n",
1941 indirect.start, buf->used );
1942 return DRM_ERR(EINVAL);
1945 RING_SPACE_TEST_WITH_RETURN( dev_priv );
1946 VB_AGE_TEST_WITH_RETURN( dev_priv );
1948 buf->used = indirect.end;
1950 /* Wait for the 3D stream to idle before the indirect buffer
1951 * containing 2D acceleration commands is processed.
1955 RADEON_WAIT_UNTIL_3D_IDLE();
1959 /* Dispatch the indirect buffer full of commands from the
1960 * X server. This is insecure and is thus only available to
1961 * privileged clients.
1963 radeon_cp_dispatch_indirect( dev, buf, indirect.start, indirect.end );
1964 if (indirect.discard) {
1965 radeon_cp_discard_buffer( dev, buf );
1973 int radeon_cp_vertex2( DRM_IOCTL_ARGS )
1976 drm_radeon_private_t *dev_priv = dev->dev_private;
1977 drm_file_t *filp_priv;
1978 drm_radeon_sarea_t *sarea_priv = dev_priv->sarea_priv;
1979 drm_device_dma_t *dma = dev->dma;
1981 drm_radeon_vertex2_t vertex;
1983 unsigned char laststate;
1985 LOCK_TEST_WITH_RETURN( dev, filp );
1988 DRM_ERROR( "%s called with no initialization\n", __FUNCTION__ );
1989 return DRM_ERR(EINVAL);
1992 DRM_GET_PRIV_WITH_RETURN( filp_priv, filp );
1994 DRM_COPY_FROM_USER_IOCTL( vertex, (drm_radeon_vertex2_t __user *)data,
1997 DRM_DEBUG( "pid=%d index=%d discard=%d\n",
1999 vertex.idx, vertex.discard );
2001 if ( vertex.idx < 0 || vertex.idx >= dma->buf_count ) {
2002 DRM_ERROR( "buffer index %d (of %d max)\n",
2003 vertex.idx, dma->buf_count - 1 );
2004 return DRM_ERR(EINVAL);
2007 RING_SPACE_TEST_WITH_RETURN( dev_priv );
2008 VB_AGE_TEST_WITH_RETURN( dev_priv );
2010 buf = dma->buflist[vertex.idx];
2012 if ( buf->filp != filp ) {
2013 DRM_ERROR( "process %d using buffer owned by %p\n",
2014 DRM_CURRENTPID, buf->filp );
2015 return DRM_ERR(EINVAL);
2018 if ( buf->pending ) {
2019 DRM_ERROR( "sending pending buffer %d\n", vertex.idx );
2020 return DRM_ERR(EINVAL);
2023 if (sarea_priv->nbox > RADEON_NR_SAREA_CLIPRECTS)
2024 return DRM_ERR(EINVAL);
2026 for (laststate = 0xff, i = 0 ; i < vertex.nr_prims ; i++) {
2027 drm_radeon_prim_t prim;
2028 drm_radeon_tcl_prim_t tclprim;
2030 if ( DRM_COPY_FROM_USER( &prim, &vertex.prim[i], sizeof(prim) ) )
2031 return DRM_ERR(EFAULT);
2033 if ( prim.stateidx != laststate ) {
2034 drm_radeon_state_t state;
2036 if ( DRM_COPY_FROM_USER( &state,
2037 &vertex.state[prim.stateidx],
2039 return DRM_ERR(EFAULT);
2041 if ( radeon_emit_state2( dev_priv, filp_priv, &state ) ) {
2042 DRM_ERROR( "radeon_emit_state2 failed\n" );
2043 return DRM_ERR( EINVAL );
2046 laststate = prim.stateidx;
2049 tclprim.start = prim.start;
2050 tclprim.finish = prim.finish;
2051 tclprim.prim = prim.prim;
2052 tclprim.vc_format = prim.vc_format;
2054 if ( prim.prim & RADEON_PRIM_WALK_IND ) {
2055 tclprim.offset = prim.numverts * 64;
2056 tclprim.numverts = RADEON_MAX_VB_VERTS; /* duh */
2058 radeon_cp_dispatch_indices( dev, buf, &tclprim );
2060 tclprim.numverts = prim.numverts;
2061 tclprim.offset = 0; /* not used */
2063 radeon_cp_dispatch_vertex( dev, buf, &tclprim );
2066 if (sarea_priv->nbox == 1)
2067 sarea_priv->nbox = 0;
2070 if ( vertex.discard ) {
2071 radeon_cp_discard_buffer( dev, buf );
2079 static int radeon_emit_packets(
2080 drm_radeon_private_t *dev_priv,
2081 drm_file_t *filp_priv,
2082 drm_radeon_cmd_header_t header,
2083 drm_radeon_cmd_buffer_t *cmdbuf )
2085 int id = (int)header.packet.packet_id;
2087 int __user *data = (int __user *)cmdbuf->buf;
2090 if (id >= RADEON_MAX_STATE_PACKETS)
2091 return DRM_ERR(EINVAL);
2093 sz = packet[id].len;
2094 reg = packet[id].start;
2096 if (sz * sizeof(int) > cmdbuf->bufsz) {
2097 DRM_ERROR( "Packet size provided larger than data provided\n" );
2098 return DRM_ERR(EINVAL);
2101 if ( radeon_check_and_fixup_packets( dev_priv, filp_priv, id, data ) ) {
2102 DRM_ERROR( "Packet verification failed\n" );
2103 return DRM_ERR( EINVAL );
2107 OUT_RING( CP_PACKET0( reg, (sz-1) ) );
2108 OUT_RING_USER_TABLE( data, sz );
2111 cmdbuf->buf += sz * sizeof(int);
2112 cmdbuf->bufsz -= sz * sizeof(int);
2116 static __inline__ int radeon_emit_scalars(
2117 drm_radeon_private_t *dev_priv,
2118 drm_radeon_cmd_header_t header,
2119 drm_radeon_cmd_buffer_t *cmdbuf )
2121 int sz = header.scalars.count;
2122 int __user *data = (int __user *)cmdbuf->buf;
2123 int start = header.scalars.offset;
2124 int stride = header.scalars.stride;
2128 OUT_RING( CP_PACKET0( RADEON_SE_TCL_SCALAR_INDX_REG, 0 ) );
2129 OUT_RING( start | (stride << RADEON_SCAL_INDX_DWORD_STRIDE_SHIFT));
2130 OUT_RING( CP_PACKET0_TABLE( RADEON_SE_TCL_SCALAR_DATA_REG, sz-1 ) );
2131 OUT_RING_USER_TABLE( data, sz );
2133 cmdbuf->buf += sz * sizeof(int);
2134 cmdbuf->bufsz -= sz * sizeof(int);
2140 static __inline__ int radeon_emit_scalars2(
2141 drm_radeon_private_t *dev_priv,
2142 drm_radeon_cmd_header_t header,
2143 drm_radeon_cmd_buffer_t *cmdbuf )
2145 int sz = header.scalars.count;
2146 int __user *data = (int __user *)cmdbuf->buf;
2147 int start = ((unsigned int)header.scalars.offset) + 0x100;
2148 int stride = header.scalars.stride;
2152 OUT_RING( CP_PACKET0( RADEON_SE_TCL_SCALAR_INDX_REG, 0 ) );
2153 OUT_RING( start | (stride << RADEON_SCAL_INDX_DWORD_STRIDE_SHIFT));
2154 OUT_RING( CP_PACKET0_TABLE( RADEON_SE_TCL_SCALAR_DATA_REG, sz-1 ) );
2155 OUT_RING_USER_TABLE( data, sz );
2157 cmdbuf->buf += sz * sizeof(int);
2158 cmdbuf->bufsz -= sz * sizeof(int);
2162 static __inline__ int radeon_emit_vectors(
2163 drm_radeon_private_t *dev_priv,
2164 drm_radeon_cmd_header_t header,
2165 drm_radeon_cmd_buffer_t *cmdbuf )
2167 int sz = header.vectors.count;
2168 int __user *data = (int __user *)cmdbuf->buf;
2169 int start = header.vectors.offset;
2170 int stride = header.vectors.stride;
2174 OUT_RING( CP_PACKET0( RADEON_SE_TCL_VECTOR_INDX_REG, 0 ) );
2175 OUT_RING( start | (stride << RADEON_VEC_INDX_OCTWORD_STRIDE_SHIFT));
2176 OUT_RING( CP_PACKET0_TABLE( RADEON_SE_TCL_VECTOR_DATA_REG, (sz-1) ) );
2177 OUT_RING_USER_TABLE( data, sz );
2180 cmdbuf->buf += sz * sizeof(int);
2181 cmdbuf->bufsz -= sz * sizeof(int);
2186 static int radeon_emit_packet3( drm_device_t *dev,
2187 drm_file_t *filp_priv,
2188 drm_radeon_cmd_buffer_t *cmdbuf )
2190 drm_radeon_private_t *dev_priv = dev->dev_private;
2192 int __user *cmd = (int __user *)cmdbuf->buf;
2198 if ( ( ret = radeon_check_and_fixup_packet3( dev_priv, filp_priv,
2199 cmdbuf, &cmdsz ) ) ) {
2200 DRM_ERROR( "Packet verification failed\n" );
2204 BEGIN_RING( cmdsz );
2205 OUT_RING_USER_TABLE( cmd, cmdsz );
2208 cmdbuf->buf += cmdsz * 4;
2209 cmdbuf->bufsz -= cmdsz * 4;
2214 static int radeon_emit_packet3_cliprect( drm_device_t *dev,
2215 drm_file_t *filp_priv,
2216 drm_radeon_cmd_buffer_t *cmdbuf,
2219 drm_radeon_private_t *dev_priv = dev->dev_private;
2220 drm_clip_rect_t box;
2222 int __user *cmd = (int __user *)cmdbuf->buf;
2224 drm_clip_rect_t __user *boxes = cmdbuf->boxes;
2230 if ( ( ret = radeon_check_and_fixup_packet3( dev_priv, filp_priv,
2231 cmdbuf, &cmdsz ) ) ) {
2232 DRM_ERROR( "Packet verification failed\n" );
2240 if ( i < cmdbuf->nbox ) {
2241 if (DRM_COPY_FROM_USER_UNCHECKED( &box, &boxes[i], sizeof(box) ))
2242 return DRM_ERR(EFAULT);
2243 /* FIXME The second and subsequent times round
2244 * this loop, send a WAIT_UNTIL_3D_IDLE before
2245 * calling emit_clip_rect(). This fixes a
2246 * lockup on fast machines when sending
2247 * several cliprects with a cmdbuf, as when
2248 * waving a 2D window over a 3D
2249 * window. Something in the commands from user
2250 * space seems to hang the card when they're
2251 * sent several times in a row. That would be
2252 * the correct place to fix it but this works
2253 * around it until I can figure that out - Tim
2257 RADEON_WAIT_UNTIL_3D_IDLE();
2260 radeon_emit_clip_rect( dev_priv, &box );
2263 BEGIN_RING( cmdsz );
2264 OUT_RING_USER_TABLE( cmd, cmdsz );
2267 } while ( ++i < cmdbuf->nbox );
2268 if (cmdbuf->nbox == 1)
2272 cmdbuf->buf += cmdsz * 4;
2273 cmdbuf->bufsz -= cmdsz * 4;
2278 static int radeon_emit_wait( drm_device_t *dev, int flags )
2280 drm_radeon_private_t *dev_priv = dev->dev_private;
2283 DRM_DEBUG("%s: %x\n", __FUNCTION__, flags);
2285 case RADEON_WAIT_2D:
2287 RADEON_WAIT_UNTIL_2D_IDLE();
2290 case RADEON_WAIT_3D:
2292 RADEON_WAIT_UNTIL_3D_IDLE();
2295 case RADEON_WAIT_2D|RADEON_WAIT_3D:
2297 RADEON_WAIT_UNTIL_IDLE();
2301 return DRM_ERR(EINVAL);
2307 int radeon_cp_cmdbuf( DRM_IOCTL_ARGS )
2310 drm_radeon_private_t *dev_priv = dev->dev_private;
2311 drm_file_t *filp_priv;
2312 drm_device_dma_t *dma = dev->dma;
2313 drm_buf_t *buf = NULL;
2315 drm_radeon_cmd_buffer_t cmdbuf;
2316 drm_radeon_cmd_header_t header;
2319 LOCK_TEST_WITH_RETURN( dev, filp );
2322 DRM_ERROR( "%s called with no initialization\n", __FUNCTION__ );
2323 return DRM_ERR(EINVAL);
2326 DRM_GET_PRIV_WITH_RETURN( filp_priv, filp );
2328 DRM_COPY_FROM_USER_IOCTL( cmdbuf, (drm_radeon_cmd_buffer_t __user *)data,
2331 RING_SPACE_TEST_WITH_RETURN( dev_priv );
2332 VB_AGE_TEST_WITH_RETURN( dev_priv );
2335 if (DRM_VERIFYAREA_READ( cmdbuf.buf, cmdbuf.bufsz ))
2336 return DRM_ERR(EFAULT);
2339 DRM_VERIFYAREA_READ(cmdbuf.boxes,
2340 cmdbuf.nbox * sizeof(drm_clip_rect_t)))
2341 return DRM_ERR(EFAULT);
2343 orig_nbox = cmdbuf.nbox;
2345 while ( cmdbuf.bufsz >= sizeof(header) ) {
2347 if (DRM_GET_USER_UNCHECKED( header.i, (int __user *)cmdbuf.buf )) {
2348 DRM_ERROR("__get_user %p\n", cmdbuf.buf);
2349 return DRM_ERR(EFAULT);
2352 cmdbuf.buf += sizeof(header);
2353 cmdbuf.bufsz -= sizeof(header);
2355 switch (header.header.cmd_type) {
2356 case RADEON_CMD_PACKET:
2357 DRM_DEBUG("RADEON_CMD_PACKET\n");
2358 if (radeon_emit_packets( dev_priv, filp_priv, header, &cmdbuf )) {
2359 DRM_ERROR("radeon_emit_packets failed\n");
2360 return DRM_ERR(EINVAL);
2364 case RADEON_CMD_SCALARS:
2365 DRM_DEBUG("RADEON_CMD_SCALARS\n");
2366 if (radeon_emit_scalars( dev_priv, header, &cmdbuf )) {
2367 DRM_ERROR("radeon_emit_scalars failed\n");
2368 return DRM_ERR(EINVAL);
2372 case RADEON_CMD_VECTORS:
2373 DRM_DEBUG("RADEON_CMD_VECTORS\n");
2374 if (radeon_emit_vectors( dev_priv, header, &cmdbuf )) {
2375 DRM_ERROR("radeon_emit_vectors failed\n");
2376 return DRM_ERR(EINVAL);
2380 case RADEON_CMD_DMA_DISCARD:
2381 DRM_DEBUG("RADEON_CMD_DMA_DISCARD\n");
2382 idx = header.dma.buf_idx;
2383 if ( idx < 0 || idx >= dma->buf_count ) {
2384 DRM_ERROR( "buffer index %d (of %d max)\n",
2385 idx, dma->buf_count - 1 );
2386 return DRM_ERR(EINVAL);
2389 buf = dma->buflist[idx];
2390 if ( buf->filp != filp || buf->pending ) {
2391 DRM_ERROR( "bad buffer %p %p %d\n",
2392 buf->filp, filp, buf->pending);
2393 return DRM_ERR(EINVAL);
2396 radeon_cp_discard_buffer( dev, buf );
2399 case RADEON_CMD_PACKET3:
2400 DRM_DEBUG("RADEON_CMD_PACKET3\n");
2401 if (radeon_emit_packet3( dev, filp_priv, &cmdbuf )) {
2402 DRM_ERROR("radeon_emit_packet3 failed\n");
2403 return DRM_ERR(EINVAL);
2407 case RADEON_CMD_PACKET3_CLIP:
2408 DRM_DEBUG("RADEON_CMD_PACKET3_CLIP\n");
2409 if (radeon_emit_packet3_cliprect( dev, filp_priv, &cmdbuf, orig_nbox )) {
2410 DRM_ERROR("radeon_emit_packet3_clip failed\n");
2411 return DRM_ERR(EINVAL);
2415 case RADEON_CMD_SCALARS2:
2416 DRM_DEBUG("RADEON_CMD_SCALARS2\n");
2417 if (radeon_emit_scalars2( dev_priv, header, &cmdbuf )) {
2418 DRM_ERROR("radeon_emit_scalars2 failed\n");
2419 return DRM_ERR(EINVAL);
2423 case RADEON_CMD_WAIT:
2424 DRM_DEBUG("RADEON_CMD_WAIT\n");
2425 if (radeon_emit_wait( dev, header.wait.flags )) {
2426 DRM_ERROR("radeon_emit_wait failed\n");
2427 return DRM_ERR(EINVAL);
2431 DRM_ERROR("bad cmd_type %d at %p\n",
2432 header.header.cmd_type,
2433 cmdbuf.buf - sizeof(header));
2434 return DRM_ERR(EINVAL);
2439 DRM_DEBUG("DONE\n");
2446 int radeon_cp_getparam( DRM_IOCTL_ARGS )
2449 drm_radeon_private_t *dev_priv = dev->dev_private;
2450 drm_radeon_getparam_t param;
2454 DRM_ERROR( "%s called with no initialization\n", __FUNCTION__ );
2455 return DRM_ERR(EINVAL);
2458 DRM_COPY_FROM_USER_IOCTL( param, (drm_radeon_getparam_t __user *)data,
2461 DRM_DEBUG( "pid=%d\n", DRM_CURRENTPID );
2463 switch( param.param ) {
2464 case RADEON_PARAM_GART_BUFFER_OFFSET:
2465 value = dev_priv->gart_buffers_offset;
2467 case RADEON_PARAM_LAST_FRAME:
2468 dev_priv->stats.last_frame_reads++;
2469 value = GET_SCRATCH( 0 );
2471 case RADEON_PARAM_LAST_DISPATCH:
2472 value = GET_SCRATCH( 1 );
2474 case RADEON_PARAM_LAST_CLEAR:
2475 dev_priv->stats.last_clear_reads++;
2476 value = GET_SCRATCH( 2 );
2478 case RADEON_PARAM_IRQ_NR:
2481 case RADEON_PARAM_GART_BASE:
2482 value = dev_priv->gart_vm_start;
2484 case RADEON_PARAM_REGISTER_HANDLE:
2485 value = dev_priv->mmio_offset;
2487 case RADEON_PARAM_STATUS_HANDLE:
2488 value = dev_priv->ring_rptr_offset;
2490 #if BITS_PER_LONG == 32
2492 * This ioctl() doesn't work on 64-bit platforms because hw_lock is a
2493 * pointer which can't fit into an int-sized variable. According to
2494 * Michel Dänzer, the ioctl() is only used on embedded platforms, so
2495 * not supporting it shouldn't be a problem. If the same functionality
2496 * is needed on 64-bit platforms, a new ioctl() would have to be added,
2497 * so backwards-compatibility for the embedded platforms can be
2498 * maintained. --davidm 4-Feb-2004.
2500 case RADEON_PARAM_SAREA_HANDLE:
2501 /* The lock is the first dword in the sarea. */
2502 value = (long)dev->lock.hw_lock;
2505 case RADEON_PARAM_GART_TEX_HANDLE:
2506 value = dev_priv->gart_textures_offset;
2509 return DRM_ERR(EINVAL);
2512 if ( DRM_COPY_TO_USER( param.value, &value, sizeof(int) ) ) {
2513 DRM_ERROR( "copy_to_user\n" );
2514 return DRM_ERR(EFAULT);
2520 int radeon_cp_setparam( DRM_IOCTL_ARGS ) {
2522 drm_radeon_private_t *dev_priv = dev->dev_private;
2523 drm_file_t *filp_priv;
2524 drm_radeon_setparam_t sp;
2525 struct drm_radeon_driver_file_fields *radeon_priv;
2528 DRM_ERROR( "%s called with no initialization\n", __FUNCTION__ );
2529 return DRM_ERR( EINVAL );
2532 DRM_GET_PRIV_WITH_RETURN( filp_priv, filp );
2534 DRM_COPY_FROM_USER_IOCTL( sp, ( drm_radeon_setparam_t __user * )data,
2537 switch( sp.param ) {
2538 case RADEON_SETPARAM_FB_LOCATION:
2539 radeon_priv = filp_priv->driver_priv;
2540 radeon_priv->radeon_fb_delta = dev_priv->fb_location - sp.value;
2543 DRM_DEBUG( "Invalid parameter %d\n", sp.param );
2544 return DRM_ERR( EINVAL );
2550 /* When a client dies:
2551 * - Check for and clean up flipped page state
2552 * - Free any alloced GART memory.
2554 * DRM infrastructure takes care of reclaiming dma buffers.
2556 static void radeon_driver_prerelease(drm_device_t *dev, DRMFILE filp)
2558 if ( dev->dev_private ) {
2559 drm_radeon_private_t *dev_priv = dev->dev_private;
2560 if ( dev_priv->page_flipping ) {
2561 radeon_do_cleanup_pageflip( dev );
2563 radeon_mem_release( filp, dev_priv->gart_heap );
2564 radeon_mem_release( filp, dev_priv->fb_heap );
2568 static void radeon_driver_pretakedown(drm_device_t *dev)
2570 radeon_do_release(dev);
2573 static int radeon_driver_open_helper(drm_device_t *dev, drm_file_t *filp_priv)
2575 drm_radeon_private_t *dev_priv = dev->dev_private;
2576 struct drm_radeon_driver_file_fields *radeon_priv;
2578 radeon_priv = (struct drm_radeon_driver_file_fields *)DRM(alloc)(sizeof(*radeon_priv), DRM_MEM_FILES);
2583 filp_priv->driver_priv = radeon_priv;
2585 radeon_priv->radeon_fb_delta = dev_priv->fb_location;
2587 radeon_priv->radeon_fb_delta = 0;
2592 static void radeon_driver_free_filp_priv(drm_device_t *dev, drm_file_t *filp_priv)
2594 struct drm_radeon_driver_file_fields *radeon_priv = filp_priv->driver_priv;
2596 DRM(free)(radeon_priv, sizeof(*radeon_priv), DRM_MEM_FILES);
2599 void radeon_driver_register_fns(struct drm_device *dev)
2601 dev->driver_features = DRIVER_USE_AGP | DRIVER_USE_MTRR | DRIVER_PCI_DMA | DRIVER_SG | DRIVER_HAVE_IRQ | DRIVER_HAVE_DMA | DRIVER_IRQ_SHARED | DRIVER_IRQ_VBL;
2602 dev->dev_priv_size = sizeof(drm_radeon_buf_priv_t);
2603 dev->fn_tbl.prerelease = radeon_driver_prerelease;
2604 dev->fn_tbl.pretakedown = radeon_driver_pretakedown;
2605 dev->fn_tbl.open_helper = radeon_driver_open_helper;
2606 dev->fn_tbl.free_filp_priv = radeon_driver_free_filp_priv;
2607 dev->fn_tbl.vblank_wait = radeon_driver_vblank_wait;
2608 dev->fn_tbl.irq_preinstall = radeon_driver_irq_preinstall;
2609 dev->fn_tbl.irq_postinstall = radeon_driver_irq_postinstall;
2610 dev->fn_tbl.irq_uninstall = radeon_driver_irq_uninstall;
2611 dev->fn_tbl.irq_handler = radeon_driver_irq_handler;