1 /* radeon_state.c -- State support for Radeon -*- linux-c -*-
3 * Copyright 2000 VA Linux Systems, Inc., Fremont, California.
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice (including the next
14 * paragraph) shall be included in all copies or substantial portions of the
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
20 * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
21 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
22 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
23 * DEALINGS IN THE SOFTWARE.
26 * Gareth Hughes <gareth@valinux.com>
27 * Kevin E. Martin <martin@valinux.com>
33 #include "drm_sarea.h"
34 #include "radeon_drm.h"
35 #include "radeon_drv.h"
38 /* ================================================================
39 * Helper functions for client state checking and fixup
42 static __inline__ int radeon_check_and_fixup_offset( drm_radeon_private_t *dev_priv,
43 drm_file_t *filp_priv,
46 struct drm_radeon_driver_file_fields *radeon_priv;
48 if ( off >= dev_priv->fb_location &&
49 off < ( dev_priv->gart_vm_start + dev_priv->gart_size ) )
52 radeon_priv = filp_priv->driver_priv;
53 off += radeon_priv->radeon_fb_delta;
55 DRM_DEBUG( "offset fixed up to 0x%x\n", off );
57 if ( off < dev_priv->fb_location ||
58 off >= ( dev_priv->gart_vm_start + dev_priv->gart_size ) )
59 return DRM_ERR( EINVAL );
66 static __inline__ int radeon_check_and_fixup_packets( drm_radeon_private_t *dev_priv,
67 drm_file_t *filp_priv,
72 case RADEON_EMIT_PP_MISC:
73 if ( radeon_check_and_fixup_offset( dev_priv, filp_priv,
74 &data[( RADEON_RB3D_DEPTHOFFSET
75 - RADEON_PP_MISC ) / 4] ) ) {
76 DRM_ERROR( "Invalid depth buffer offset\n" );
77 return DRM_ERR( EINVAL );
81 case RADEON_EMIT_PP_CNTL:
82 if ( radeon_check_and_fixup_offset( dev_priv, filp_priv,
83 &data[( RADEON_RB3D_COLOROFFSET
84 - RADEON_PP_CNTL ) / 4] ) ) {
85 DRM_ERROR( "Invalid colour buffer offset\n" );
86 return DRM_ERR( EINVAL );
90 case R200_EMIT_PP_TXOFFSET_0:
91 case R200_EMIT_PP_TXOFFSET_1:
92 case R200_EMIT_PP_TXOFFSET_2:
93 case R200_EMIT_PP_TXOFFSET_3:
94 case R200_EMIT_PP_TXOFFSET_4:
95 case R200_EMIT_PP_TXOFFSET_5:
96 if ( radeon_check_and_fixup_offset( dev_priv, filp_priv,
98 DRM_ERROR( "Invalid R200 texture offset\n" );
99 return DRM_ERR( EINVAL );
103 case RADEON_EMIT_PP_TXFILTER_0:
104 case RADEON_EMIT_PP_TXFILTER_1:
105 case RADEON_EMIT_PP_TXFILTER_2:
106 if ( radeon_check_and_fixup_offset( dev_priv, filp_priv,
107 &data[( RADEON_PP_TXOFFSET_0
108 - RADEON_PP_TXFILTER_0 ) / 4] ) ) {
109 DRM_ERROR( "Invalid R100 texture offset\n" );
110 return DRM_ERR( EINVAL );
114 case R200_EMIT_PP_CUBIC_OFFSETS_0:
115 case R200_EMIT_PP_CUBIC_OFFSETS_1:
116 case R200_EMIT_PP_CUBIC_OFFSETS_2:
117 case R200_EMIT_PP_CUBIC_OFFSETS_3:
118 case R200_EMIT_PP_CUBIC_OFFSETS_4:
119 case R200_EMIT_PP_CUBIC_OFFSETS_5: {
121 for ( i = 0; i < 5; i++ ) {
122 if ( radeon_check_and_fixup_offset( dev_priv, filp_priv,
124 DRM_ERROR( "Invalid R200 cubic texture offset\n" );
125 return DRM_ERR( EINVAL );
131 case RADEON_EMIT_RB3D_COLORPITCH:
132 case RADEON_EMIT_RE_LINE_PATTERN:
133 case RADEON_EMIT_SE_LINE_WIDTH:
134 case RADEON_EMIT_PP_LUM_MATRIX:
135 case RADEON_EMIT_PP_ROT_MATRIX_0:
136 case RADEON_EMIT_RB3D_STENCILREFMASK:
137 case RADEON_EMIT_SE_VPORT_XSCALE:
138 case RADEON_EMIT_SE_CNTL:
139 case RADEON_EMIT_SE_CNTL_STATUS:
140 case RADEON_EMIT_RE_MISC:
141 case RADEON_EMIT_PP_BORDER_COLOR_0:
142 case RADEON_EMIT_PP_BORDER_COLOR_1:
143 case RADEON_EMIT_PP_BORDER_COLOR_2:
144 case RADEON_EMIT_SE_ZBIAS_FACTOR:
145 case RADEON_EMIT_SE_TCL_OUTPUT_VTX_FMT:
146 case RADEON_EMIT_SE_TCL_MATERIAL_EMMISSIVE_RED:
147 case R200_EMIT_PP_TXCBLEND_0:
148 case R200_EMIT_PP_TXCBLEND_1:
149 case R200_EMIT_PP_TXCBLEND_2:
150 case R200_EMIT_PP_TXCBLEND_3:
151 case R200_EMIT_PP_TXCBLEND_4:
152 case R200_EMIT_PP_TXCBLEND_5:
153 case R200_EMIT_PP_TXCBLEND_6:
154 case R200_EMIT_PP_TXCBLEND_7:
155 case R200_EMIT_TCL_LIGHT_MODEL_CTL_0:
156 case R200_EMIT_TFACTOR_0:
157 case R200_EMIT_VTX_FMT_0:
158 case R200_EMIT_VAP_CTL:
159 case R200_EMIT_MATRIX_SELECT_0:
160 case R200_EMIT_TEX_PROC_CTL_2:
161 case R200_EMIT_TCL_UCP_VERT_BLEND_CTL:
162 case R200_EMIT_PP_TXFILTER_0:
163 case R200_EMIT_PP_TXFILTER_1:
164 case R200_EMIT_PP_TXFILTER_2:
165 case R200_EMIT_PP_TXFILTER_3:
166 case R200_EMIT_PP_TXFILTER_4:
167 case R200_EMIT_PP_TXFILTER_5:
168 case R200_EMIT_VTE_CNTL:
169 case R200_EMIT_OUTPUT_VTX_COMP_SEL:
170 case R200_EMIT_PP_TAM_DEBUG3:
171 case R200_EMIT_PP_CNTL_X:
172 case R200_EMIT_RB3D_DEPTHXY_OFFSET:
173 case R200_EMIT_RE_AUX_SCISSOR_CNTL:
174 case R200_EMIT_RE_SCISSOR_TL_0:
175 case R200_EMIT_RE_SCISSOR_TL_1:
176 case R200_EMIT_RE_SCISSOR_TL_2:
177 case R200_EMIT_SE_VAP_CNTL_STATUS:
178 case R200_EMIT_SE_VTX_STATE_CNTL:
179 case R200_EMIT_RE_POINTSIZE:
180 case R200_EMIT_TCL_INPUT_VTX_VECTOR_ADDR_0:
181 case R200_EMIT_PP_CUBIC_FACES_0:
182 case R200_EMIT_PP_CUBIC_FACES_1:
183 case R200_EMIT_PP_CUBIC_FACES_2:
184 case R200_EMIT_PP_CUBIC_FACES_3:
185 case R200_EMIT_PP_CUBIC_FACES_4:
186 case R200_EMIT_PP_CUBIC_FACES_5:
187 case RADEON_EMIT_PP_TEX_SIZE_0:
188 case RADEON_EMIT_PP_TEX_SIZE_1:
189 case RADEON_EMIT_PP_TEX_SIZE_2:
190 case R200_EMIT_RB3D_BLENDCOLOR:
191 /* These packets don't contain memory offsets */
195 DRM_ERROR( "Unknown state packet ID %d\n", id );
196 return DRM_ERR( EINVAL );
202 static __inline__ int radeon_check_and_fixup_packet3( drm_radeon_private_t *dev_priv,
203 drm_file_t *filp_priv,
204 drm_radeon_cmd_buffer_t *cmdbuf,
205 unsigned int *cmdsz ) {
206 u32 *cmd = (u32 *) cmdbuf->buf;
208 *cmdsz = 2 + ( ( cmd[0] & RADEON_CP_PACKET_COUNT_MASK ) >> 16 );
210 if ( ( cmd[0] & 0xc0000000 ) != RADEON_CP_PACKET3 ) {
211 DRM_ERROR( "Not a type 3 packet\n" );
212 return DRM_ERR( EINVAL );
215 if ( 4 * *cmdsz > cmdbuf->bufsz ) {
216 DRM_ERROR( "Packet size larger than size of data provided\n" );
217 return DRM_ERR( EINVAL );
220 /* Check client state and fix it up if necessary */
221 if ( cmd[0] & 0x8000 ) { /* MSB of opcode: next DWORD GUI_CNTL */
224 if ( cmd[1] & ( RADEON_GMC_SRC_PITCH_OFFSET_CNTL
225 | RADEON_GMC_DST_PITCH_OFFSET_CNTL ) ) {
226 offset = cmd[2] << 10;
227 if ( radeon_check_and_fixup_offset( dev_priv, filp_priv, &offset ) ) {
228 DRM_ERROR( "Invalid first packet offset\n" );
229 return DRM_ERR( EINVAL );
231 cmd[2] = ( cmd[2] & 0xffc00000 ) | offset >> 10;
234 if ( ( cmd[1] & RADEON_GMC_SRC_PITCH_OFFSET_CNTL ) &&
235 ( cmd[1] & RADEON_GMC_DST_PITCH_OFFSET_CNTL ) ) {
236 offset = cmd[3] << 10;
237 if ( radeon_check_and_fixup_offset( dev_priv, filp_priv, &offset ) ) {
238 DRM_ERROR( "Invalid second packet offset\n" );
239 return DRM_ERR( EINVAL );
241 cmd[3] = ( cmd[3] & 0xffc00000 ) | offset >> 10;
249 /* ================================================================
250 * CP hardware state programming functions
253 static __inline__ void radeon_emit_clip_rect( drm_radeon_private_t *dev_priv,
254 drm_clip_rect_t *box )
258 DRM_DEBUG( " box: x1=%d y1=%d x2=%d y2=%d\n",
259 box->x1, box->y1, box->x2, box->y2 );
262 OUT_RING( CP_PACKET0( RADEON_RE_TOP_LEFT, 0 ) );
263 OUT_RING( (box->y1 << 16) | box->x1 );
264 OUT_RING( CP_PACKET0( RADEON_RE_WIDTH_HEIGHT, 0 ) );
265 OUT_RING( ((box->y2 - 1) << 16) | (box->x2 - 1) );
271 static int radeon_emit_state( drm_radeon_private_t *dev_priv,
272 drm_file_t *filp_priv,
273 drm_radeon_context_regs_t *ctx,
274 drm_radeon_texture_regs_t *tex,
278 DRM_DEBUG( "dirty=0x%08x\n", dirty );
280 if ( dirty & RADEON_UPLOAD_CONTEXT ) {
281 if ( radeon_check_and_fixup_offset( dev_priv, filp_priv,
282 &ctx->rb3d_depthoffset ) ) {
283 DRM_ERROR( "Invalid depth buffer offset\n" );
284 return DRM_ERR( EINVAL );
287 if ( radeon_check_and_fixup_offset( dev_priv, filp_priv,
288 &ctx->rb3d_coloroffset ) ) {
289 DRM_ERROR( "Invalid depth buffer offset\n" );
290 return DRM_ERR( EINVAL );
294 OUT_RING( CP_PACKET0( RADEON_PP_MISC, 6 ) );
295 OUT_RING( ctx->pp_misc );
296 OUT_RING( ctx->pp_fog_color );
297 OUT_RING( ctx->re_solid_color );
298 OUT_RING( ctx->rb3d_blendcntl );
299 OUT_RING( ctx->rb3d_depthoffset );
300 OUT_RING( ctx->rb3d_depthpitch );
301 OUT_RING( ctx->rb3d_zstencilcntl );
302 OUT_RING( CP_PACKET0( RADEON_PP_CNTL, 2 ) );
303 OUT_RING( ctx->pp_cntl );
304 OUT_RING( ctx->rb3d_cntl );
305 OUT_RING( ctx->rb3d_coloroffset );
306 OUT_RING( CP_PACKET0( RADEON_RB3D_COLORPITCH, 0 ) );
307 OUT_RING( ctx->rb3d_colorpitch );
311 if ( dirty & RADEON_UPLOAD_VERTFMT ) {
313 OUT_RING( CP_PACKET0( RADEON_SE_COORD_FMT, 0 ) );
314 OUT_RING( ctx->se_coord_fmt );
318 if ( dirty & RADEON_UPLOAD_LINE ) {
320 OUT_RING( CP_PACKET0( RADEON_RE_LINE_PATTERN, 1 ) );
321 OUT_RING( ctx->re_line_pattern );
322 OUT_RING( ctx->re_line_state );
323 OUT_RING( CP_PACKET0( RADEON_SE_LINE_WIDTH, 0 ) );
324 OUT_RING( ctx->se_line_width );
328 if ( dirty & RADEON_UPLOAD_BUMPMAP ) {
330 OUT_RING( CP_PACKET0( RADEON_PP_LUM_MATRIX, 0 ) );
331 OUT_RING( ctx->pp_lum_matrix );
332 OUT_RING( CP_PACKET0( RADEON_PP_ROT_MATRIX_0, 1 ) );
333 OUT_RING( ctx->pp_rot_matrix_0 );
334 OUT_RING( ctx->pp_rot_matrix_1 );
338 if ( dirty & RADEON_UPLOAD_MASKS ) {
340 OUT_RING( CP_PACKET0( RADEON_RB3D_STENCILREFMASK, 2 ) );
341 OUT_RING( ctx->rb3d_stencilrefmask );
342 OUT_RING( ctx->rb3d_ropcntl );
343 OUT_RING( ctx->rb3d_planemask );
347 if ( dirty & RADEON_UPLOAD_VIEWPORT ) {
349 OUT_RING( CP_PACKET0( RADEON_SE_VPORT_XSCALE, 5 ) );
350 OUT_RING( ctx->se_vport_xscale );
351 OUT_RING( ctx->se_vport_xoffset );
352 OUT_RING( ctx->se_vport_yscale );
353 OUT_RING( ctx->se_vport_yoffset );
354 OUT_RING( ctx->se_vport_zscale );
355 OUT_RING( ctx->se_vport_zoffset );
359 if ( dirty & RADEON_UPLOAD_SETUP ) {
361 OUT_RING( CP_PACKET0( RADEON_SE_CNTL, 0 ) );
362 OUT_RING( ctx->se_cntl );
363 OUT_RING( CP_PACKET0( RADEON_SE_CNTL_STATUS, 0 ) );
364 OUT_RING( ctx->se_cntl_status );
368 if ( dirty & RADEON_UPLOAD_MISC ) {
370 OUT_RING( CP_PACKET0( RADEON_RE_MISC, 0 ) );
371 OUT_RING( ctx->re_misc );
375 if ( dirty & RADEON_UPLOAD_TEX0 ) {
376 if ( radeon_check_and_fixup_offset( dev_priv, filp_priv,
377 &tex[0].pp_txoffset ) ) {
378 DRM_ERROR( "Invalid texture offset for unit 0\n" );
379 return DRM_ERR( EINVAL );
383 OUT_RING( CP_PACKET0( RADEON_PP_TXFILTER_0, 5 ) );
384 OUT_RING( tex[0].pp_txfilter );
385 OUT_RING( tex[0].pp_txformat );
386 OUT_RING( tex[0].pp_txoffset );
387 OUT_RING( tex[0].pp_txcblend );
388 OUT_RING( tex[0].pp_txablend );
389 OUT_RING( tex[0].pp_tfactor );
390 OUT_RING( CP_PACKET0( RADEON_PP_BORDER_COLOR_0, 0 ) );
391 OUT_RING( tex[0].pp_border_color );
395 if ( dirty & RADEON_UPLOAD_TEX1 ) {
396 if ( radeon_check_and_fixup_offset( dev_priv, filp_priv,
397 &tex[1].pp_txoffset ) ) {
398 DRM_ERROR( "Invalid texture offset for unit 1\n" );
399 return DRM_ERR( EINVAL );
403 OUT_RING( CP_PACKET0( RADEON_PP_TXFILTER_1, 5 ) );
404 OUT_RING( tex[1].pp_txfilter );
405 OUT_RING( tex[1].pp_txformat );
406 OUT_RING( tex[1].pp_txoffset );
407 OUT_RING( tex[1].pp_txcblend );
408 OUT_RING( tex[1].pp_txablend );
409 OUT_RING( tex[1].pp_tfactor );
410 OUT_RING( CP_PACKET0( RADEON_PP_BORDER_COLOR_1, 0 ) );
411 OUT_RING( tex[1].pp_border_color );
415 if ( dirty & RADEON_UPLOAD_TEX2 ) {
416 if ( radeon_check_and_fixup_offset( dev_priv, filp_priv,
417 &tex[2].pp_txoffset ) ) {
418 DRM_ERROR( "Invalid texture offset for unit 2\n" );
419 return DRM_ERR( EINVAL );
423 OUT_RING( CP_PACKET0( RADEON_PP_TXFILTER_2, 5 ) );
424 OUT_RING( tex[2].pp_txfilter );
425 OUT_RING( tex[2].pp_txformat );
426 OUT_RING( tex[2].pp_txoffset );
427 OUT_RING( tex[2].pp_txcblend );
428 OUT_RING( tex[2].pp_txablend );
429 OUT_RING( tex[2].pp_tfactor );
430 OUT_RING( CP_PACKET0( RADEON_PP_BORDER_COLOR_2, 0 ) );
431 OUT_RING( tex[2].pp_border_color );
440 static int radeon_emit_state2( drm_radeon_private_t *dev_priv,
441 drm_file_t *filp_priv,
442 drm_radeon_state_t *state )
446 if (state->dirty & RADEON_UPLOAD_ZBIAS) {
448 OUT_RING( CP_PACKET0( RADEON_SE_ZBIAS_FACTOR, 1 ) );
449 OUT_RING( state->context2.se_zbias_factor );
450 OUT_RING( state->context2.se_zbias_constant );
454 return radeon_emit_state( dev_priv, filp_priv, &state->context,
455 state->tex, state->dirty );
458 /* New (1.3) state mechanism. 3 commands (packet, scalar, vector) in
459 * 1.3 cmdbuffers allow all previous state to be updated as well as
460 * the tcl scalar and vector areas.
466 } packet[RADEON_MAX_STATE_PACKETS] = {
467 { RADEON_PP_MISC,7,"RADEON_PP_MISC" },
468 { RADEON_PP_CNTL,3,"RADEON_PP_CNTL" },
469 { RADEON_RB3D_COLORPITCH,1,"RADEON_RB3D_COLORPITCH" },
470 { RADEON_RE_LINE_PATTERN,2,"RADEON_RE_LINE_PATTERN" },
471 { RADEON_SE_LINE_WIDTH,1,"RADEON_SE_LINE_WIDTH" },
472 { RADEON_PP_LUM_MATRIX,1,"RADEON_PP_LUM_MATRIX" },
473 { RADEON_PP_ROT_MATRIX_0,2,"RADEON_PP_ROT_MATRIX_0" },
474 { RADEON_RB3D_STENCILREFMASK,3,"RADEON_RB3D_STENCILREFMASK" },
475 { RADEON_SE_VPORT_XSCALE,6,"RADEON_SE_VPORT_XSCALE" },
476 { RADEON_SE_CNTL,2,"RADEON_SE_CNTL" },
477 { RADEON_SE_CNTL_STATUS,1,"RADEON_SE_CNTL_STATUS" },
478 { RADEON_RE_MISC,1,"RADEON_RE_MISC" },
479 { RADEON_PP_TXFILTER_0,6,"RADEON_PP_TXFILTER_0" },
480 { RADEON_PP_BORDER_COLOR_0,1,"RADEON_PP_BORDER_COLOR_0" },
481 { RADEON_PP_TXFILTER_1,6,"RADEON_PP_TXFILTER_1" },
482 { RADEON_PP_BORDER_COLOR_1,1,"RADEON_PP_BORDER_COLOR_1" },
483 { RADEON_PP_TXFILTER_2,6,"RADEON_PP_TXFILTER_2" },
484 { RADEON_PP_BORDER_COLOR_2,1,"RADEON_PP_BORDER_COLOR_2" },
485 { RADEON_SE_ZBIAS_FACTOR,2,"RADEON_SE_ZBIAS_FACTOR" },
486 { RADEON_SE_TCL_OUTPUT_VTX_FMT,11,"RADEON_SE_TCL_OUTPUT_VTX_FMT" },
487 { RADEON_SE_TCL_MATERIAL_EMMISSIVE_RED,17,"RADEON_SE_TCL_MATERIAL_EMMISSIVE_RED" },
488 { R200_PP_TXCBLEND_0, 4, "R200_PP_TXCBLEND_0" },
489 { R200_PP_TXCBLEND_1, 4, "R200_PP_TXCBLEND_1" },
490 { R200_PP_TXCBLEND_2, 4, "R200_PP_TXCBLEND_2" },
491 { R200_PP_TXCBLEND_3, 4, "R200_PP_TXCBLEND_3" },
492 { R200_PP_TXCBLEND_4, 4, "R200_PP_TXCBLEND_4" },
493 { R200_PP_TXCBLEND_5, 4, "R200_PP_TXCBLEND_5" },
494 { R200_PP_TXCBLEND_6, 4, "R200_PP_TXCBLEND_6" },
495 { R200_PP_TXCBLEND_7, 4, "R200_PP_TXCBLEND_7" },
496 { R200_SE_TCL_LIGHT_MODEL_CTL_0, 6, "R200_SE_TCL_LIGHT_MODEL_CTL_0" },
497 { R200_PP_TFACTOR_0, 6, "R200_PP_TFACTOR_0" },
498 { R200_SE_VTX_FMT_0, 4, "R200_SE_VTX_FMT_0" },
499 { R200_SE_VAP_CNTL, 1, "R200_SE_VAP_CNTL" },
500 { R200_SE_TCL_MATRIX_SEL_0, 5, "R200_SE_TCL_MATRIX_SEL_0" },
501 { R200_SE_TCL_TEX_PROC_CTL_2, 5, "R200_SE_TCL_TEX_PROC_CTL_2" },
502 { R200_SE_TCL_UCP_VERT_BLEND_CTL, 1, "R200_SE_TCL_UCP_VERT_BLEND_CTL" },
503 { R200_PP_TXFILTER_0, 6, "R200_PP_TXFILTER_0" },
504 { R200_PP_TXFILTER_1, 6, "R200_PP_TXFILTER_1" },
505 { R200_PP_TXFILTER_2, 6, "R200_PP_TXFILTER_2" },
506 { R200_PP_TXFILTER_3, 6, "R200_PP_TXFILTER_3" },
507 { R200_PP_TXFILTER_4, 6, "R200_PP_TXFILTER_4" },
508 { R200_PP_TXFILTER_5, 6, "R200_PP_TXFILTER_5" },
509 { R200_PP_TXOFFSET_0, 1, "R200_PP_TXOFFSET_0" },
510 { R200_PP_TXOFFSET_1, 1, "R200_PP_TXOFFSET_1" },
511 { R200_PP_TXOFFSET_2, 1, "R200_PP_TXOFFSET_2" },
512 { R200_PP_TXOFFSET_3, 1, "R200_PP_TXOFFSET_3" },
513 { R200_PP_TXOFFSET_4, 1, "R200_PP_TXOFFSET_4" },
514 { R200_PP_TXOFFSET_5, 1, "R200_PP_TXOFFSET_5" },
515 { R200_SE_VTE_CNTL, 1, "R200_SE_VTE_CNTL" },
516 { R200_SE_TCL_OUTPUT_VTX_COMP_SEL, 1, "R200_SE_TCL_OUTPUT_VTX_COMP_SEL" },
517 { R200_PP_TAM_DEBUG3, 1, "R200_PP_TAM_DEBUG3" },
518 { R200_PP_CNTL_X, 1, "R200_PP_CNTL_X" },
519 { R200_RB3D_DEPTHXY_OFFSET, 1, "R200_RB3D_DEPTHXY_OFFSET" },
520 { R200_RE_AUX_SCISSOR_CNTL, 1, "R200_RE_AUX_SCISSOR_CNTL" },
521 { R200_RE_SCISSOR_TL_0, 2, "R200_RE_SCISSOR_TL_0" },
522 { R200_RE_SCISSOR_TL_1, 2, "R200_RE_SCISSOR_TL_1" },
523 { R200_RE_SCISSOR_TL_2, 2, "R200_RE_SCISSOR_TL_2" },
524 { R200_SE_VAP_CNTL_STATUS, 1, "R200_SE_VAP_CNTL_STATUS" },
525 { R200_SE_VTX_STATE_CNTL, 1, "R200_SE_VTX_STATE_CNTL" },
526 { R200_RE_POINTSIZE, 1, "R200_RE_POINTSIZE" },
527 { R200_SE_TCL_INPUT_VTX_VECTOR_ADDR_0, 4, "R200_SE_TCL_INPUT_VTX_VECTOR_ADDR_0" },
528 { R200_PP_CUBIC_FACES_0, 1, "R200_PP_CUBIC_FACES_0" }, /* 61 */
529 { R200_PP_CUBIC_OFFSET_F1_0, 5, "R200_PP_CUBIC_OFFSET_F1_0" }, /* 62 */
530 { R200_PP_CUBIC_FACES_1, 1, "R200_PP_CUBIC_FACES_1" },
531 { R200_PP_CUBIC_OFFSET_F1_1, 5, "R200_PP_CUBIC_OFFSET_F1_1" },
532 { R200_PP_CUBIC_FACES_2, 1, "R200_PP_CUBIC_FACES_2" },
533 { R200_PP_CUBIC_OFFSET_F1_2, 5, "R200_PP_CUBIC_OFFSET_F1_2" },
534 { R200_PP_CUBIC_FACES_3, 1, "R200_PP_CUBIC_FACES_3" },
535 { R200_PP_CUBIC_OFFSET_F1_3, 5, "R200_PP_CUBIC_OFFSET_F1_3" },
536 { R200_PP_CUBIC_FACES_4, 1, "R200_PP_CUBIC_FACES_4" },
537 { R200_PP_CUBIC_OFFSET_F1_4, 5, "R200_PP_CUBIC_OFFSET_F1_4" },
538 { R200_PP_CUBIC_FACES_5, 1, "R200_PP_CUBIC_FACES_5" },
539 { R200_PP_CUBIC_OFFSET_F1_5, 5, "R200_PP_CUBIC_OFFSET_F1_5" },
540 { RADEON_PP_TEX_SIZE_0, 2, "RADEON_PP_TEX_SIZE_0" },
541 { RADEON_PP_TEX_SIZE_1, 2, "RADEON_PP_TEX_SIZE_1" },
542 { RADEON_PP_TEX_SIZE_2, 2, "RADEON_PP_TEX_SIZE_2" },
543 { R200_RB3D_BLENDCOLOR, 3, "R200_RB3D_BLENDCOLOR" },
548 /* ================================================================
549 * Performance monitoring functions
552 static void radeon_clear_box( drm_radeon_private_t *dev_priv,
553 int x, int y, int w, int h,
554 int r, int g, int b )
559 x += dev_priv->sarea_priv->boxes[0].x1;
560 y += dev_priv->sarea_priv->boxes[0].y1;
562 switch ( dev_priv->color_fmt ) {
563 case RADEON_COLOR_FORMAT_RGB565:
564 color = (((r & 0xf8) << 8) |
568 case RADEON_COLOR_FORMAT_ARGB8888:
570 color = (((0xff) << 24) | (r << 16) | (g << 8) | b);
575 RADEON_WAIT_UNTIL_3D_IDLE();
576 OUT_RING( CP_PACKET0( RADEON_DP_WRITE_MASK, 0 ) );
577 OUT_RING( 0xffffffff );
582 OUT_RING( CP_PACKET3( RADEON_CNTL_PAINT_MULTI, 4 ) );
583 OUT_RING( RADEON_GMC_DST_PITCH_OFFSET_CNTL |
584 RADEON_GMC_BRUSH_SOLID_COLOR |
585 (dev_priv->color_fmt << 8) |
586 RADEON_GMC_SRC_DATATYPE_COLOR |
588 RADEON_GMC_CLR_CMP_CNTL_DIS );
590 if ( dev_priv->page_flipping && dev_priv->current_page == 1 ) {
591 OUT_RING( dev_priv->front_pitch_offset );
593 OUT_RING( dev_priv->back_pitch_offset );
598 OUT_RING( (x << 16) | y );
599 OUT_RING( (w << 16) | h );
604 static void radeon_cp_performance_boxes( drm_radeon_private_t *dev_priv )
606 /* Collapse various things into a wait flag -- trying to
607 * guess if userspase slept -- better just to have them tell us.
609 if (dev_priv->stats.last_frame_reads > 1 ||
610 dev_priv->stats.last_clear_reads > dev_priv->stats.clears) {
611 dev_priv->stats.boxes |= RADEON_BOX_WAIT_IDLE;
614 if (dev_priv->stats.freelist_loops) {
615 dev_priv->stats.boxes |= RADEON_BOX_WAIT_IDLE;
618 /* Purple box for page flipping
620 if ( dev_priv->stats.boxes & RADEON_BOX_FLIP )
621 radeon_clear_box( dev_priv, 4, 4, 8, 8, 255, 0, 255 );
623 /* Red box if we have to wait for idle at any point
625 if ( dev_priv->stats.boxes & RADEON_BOX_WAIT_IDLE )
626 radeon_clear_box( dev_priv, 16, 4, 8, 8, 255, 0, 0 );
628 /* Blue box: lost context?
631 /* Yellow box for texture swaps
633 if ( dev_priv->stats.boxes & RADEON_BOX_TEXTURE_LOAD )
634 radeon_clear_box( dev_priv, 40, 4, 8, 8, 255, 255, 0 );
636 /* Green box if hardware never idles (as far as we can tell)
638 if ( !(dev_priv->stats.boxes & RADEON_BOX_DMA_IDLE) )
639 radeon_clear_box( dev_priv, 64, 4, 8, 8, 0, 255, 0 );
642 /* Draw bars indicating number of buffers allocated
643 * (not a great measure, easily confused)
645 if (dev_priv->stats.requested_bufs) {
646 if (dev_priv->stats.requested_bufs > 100)
647 dev_priv->stats.requested_bufs = 100;
649 radeon_clear_box( dev_priv, 4, 16,
650 dev_priv->stats.requested_bufs, 4,
654 memset( &dev_priv->stats, 0, sizeof(dev_priv->stats) );
657 /* ================================================================
658 * CP command dispatch functions
661 static void radeon_cp_dispatch_clear( drm_device_t *dev,
662 drm_radeon_clear_t *clear,
663 drm_radeon_clear_rect_t *depth_boxes )
665 drm_radeon_private_t *dev_priv = dev->dev_private;
666 drm_radeon_sarea_t *sarea_priv = dev_priv->sarea_priv;
667 drm_radeon_depth_clear_t *depth_clear = &dev_priv->depth_clear;
668 int nbox = sarea_priv->nbox;
669 drm_clip_rect_t *pbox = sarea_priv->boxes;
670 unsigned int flags = clear->flags;
671 u32 rb3d_cntl = 0, rb3d_stencilrefmask= 0;
674 DRM_DEBUG( "flags = 0x%x\n", flags );
676 dev_priv->stats.clears++;
678 if ( dev_priv->page_flipping && dev_priv->current_page == 1 ) {
679 unsigned int tmp = flags;
681 flags &= ~(RADEON_FRONT | RADEON_BACK);
682 if ( tmp & RADEON_FRONT ) flags |= RADEON_BACK;
683 if ( tmp & RADEON_BACK ) flags |= RADEON_FRONT;
686 if ( flags & (RADEON_FRONT | RADEON_BACK) ) {
690 /* Ensure the 3D stream is idle before doing a
691 * 2D fill to clear the front or back buffer.
693 RADEON_WAIT_UNTIL_3D_IDLE();
695 OUT_RING( CP_PACKET0( RADEON_DP_WRITE_MASK, 0 ) );
696 OUT_RING( clear->color_mask );
700 /* Make sure we restore the 3D state next time.
702 dev_priv->sarea_priv->ctx_owner = 0;
704 for ( i = 0 ; i < nbox ; i++ ) {
707 int w = pbox[i].x2 - x;
708 int h = pbox[i].y2 - y;
710 DRM_DEBUG( "dispatch clear %d,%d-%d,%d flags 0x%x\n",
713 if ( flags & RADEON_FRONT ) {
716 OUT_RING( CP_PACKET3( RADEON_CNTL_PAINT_MULTI, 4 ) );
717 OUT_RING( RADEON_GMC_DST_PITCH_OFFSET_CNTL |
718 RADEON_GMC_BRUSH_SOLID_COLOR |
719 (dev_priv->color_fmt << 8) |
720 RADEON_GMC_SRC_DATATYPE_COLOR |
722 RADEON_GMC_CLR_CMP_CNTL_DIS );
724 OUT_RING( dev_priv->front_pitch_offset );
725 OUT_RING( clear->clear_color );
727 OUT_RING( (x << 16) | y );
728 OUT_RING( (w << 16) | h );
733 if ( flags & RADEON_BACK ) {
736 OUT_RING( CP_PACKET3( RADEON_CNTL_PAINT_MULTI, 4 ) );
737 OUT_RING( RADEON_GMC_DST_PITCH_OFFSET_CNTL |
738 RADEON_GMC_BRUSH_SOLID_COLOR |
739 (dev_priv->color_fmt << 8) |
740 RADEON_GMC_SRC_DATATYPE_COLOR |
742 RADEON_GMC_CLR_CMP_CNTL_DIS );
744 OUT_RING( dev_priv->back_pitch_offset );
745 OUT_RING( clear->clear_color );
747 OUT_RING( (x << 16) | y );
748 OUT_RING( (w << 16) | h );
755 /* We have to clear the depth and/or stencil buffers by
756 * rendering a quad into just those buffers. Thus, we have to
757 * make sure the 3D engine is configured correctly.
759 if ( dev_priv->is_r200 &&
760 (flags & (RADEON_DEPTH | RADEON_STENCIL)) ) {
765 int tempRB3D_ZSTENCILCNTL;
766 int tempRB3D_STENCILREFMASK;
767 int tempRB3D_PLANEMASK;
770 int tempSE_VTX_FMT_0;
771 int tempSE_VTX_FMT_1;
773 int tempRE_AUX_SCISSOR_CNTL;
778 tempRB3D_CNTL = depth_clear->rb3d_cntl;
779 tempRB3D_CNTL &= ~(1<<15); /* unset radeon magic flag */
781 tempRB3D_ZSTENCILCNTL = depth_clear->rb3d_zstencilcntl;
782 tempRB3D_STENCILREFMASK = 0x0;
784 tempSE_CNTL = depth_clear->se_cntl;
790 tempSE_VAP_CNTL = (/* SE_VAP_CNTL__FORCE_W_TO_ONE_MASK | */
791 (0x9 << SE_VAP_CNTL__VF_MAX_VTX_NUM__SHIFT));
793 tempRB3D_PLANEMASK = 0x0;
795 tempRE_AUX_SCISSOR_CNTL = 0x0;
798 SE_VTE_CNTL__VTX_XY_FMT_MASK |
799 SE_VTE_CNTL__VTX_Z_FMT_MASK;
801 /* Vertex format (X, Y, Z, W)*/
803 SE_VTX_FMT_0__VTX_Z0_PRESENT_MASK |
804 SE_VTX_FMT_0__VTX_W0_PRESENT_MASK;
805 tempSE_VTX_FMT_1 = 0x0;
809 * Depth buffer specific enables
811 if (flags & RADEON_DEPTH) {
812 /* Enable depth buffer */
813 tempRB3D_CNTL |= RADEON_Z_ENABLE;
815 /* Disable depth buffer */
816 tempRB3D_CNTL &= ~RADEON_Z_ENABLE;
820 * Stencil buffer specific enables
822 if ( flags & RADEON_STENCIL ) {
823 tempRB3D_CNTL |= RADEON_STENCIL_ENABLE;
824 tempRB3D_STENCILREFMASK = clear->depth_mask;
826 tempRB3D_CNTL &= ~RADEON_STENCIL_ENABLE;
827 tempRB3D_STENCILREFMASK = 0x00000000;
831 RADEON_WAIT_UNTIL_2D_IDLE();
833 OUT_RING_REG( RADEON_PP_CNTL, tempPP_CNTL );
834 OUT_RING_REG( R200_RE_CNTL, tempRE_CNTL );
835 OUT_RING_REG( RADEON_RB3D_CNTL, tempRB3D_CNTL );
836 OUT_RING_REG( RADEON_RB3D_ZSTENCILCNTL,
837 tempRB3D_ZSTENCILCNTL );
838 OUT_RING_REG( RADEON_RB3D_STENCILREFMASK,
839 tempRB3D_STENCILREFMASK );
840 OUT_RING_REG( RADEON_RB3D_PLANEMASK, tempRB3D_PLANEMASK );
841 OUT_RING_REG( RADEON_SE_CNTL, tempSE_CNTL );
842 OUT_RING_REG( R200_SE_VTE_CNTL, tempSE_VTE_CNTL );
843 OUT_RING_REG( R200_SE_VTX_FMT_0, tempSE_VTX_FMT_0 );
844 OUT_RING_REG( R200_SE_VTX_FMT_1, tempSE_VTX_FMT_1 );
845 OUT_RING_REG( R200_SE_VAP_CNTL, tempSE_VAP_CNTL );
846 OUT_RING_REG( R200_RE_AUX_SCISSOR_CNTL,
847 tempRE_AUX_SCISSOR_CNTL );
850 /* Make sure we restore the 3D state next time.
852 dev_priv->sarea_priv->ctx_owner = 0;
854 for ( i = 0 ; i < nbox ; i++ ) {
856 /* Funny that this should be required --
859 radeon_emit_clip_rect( dev_priv,
860 &sarea_priv->boxes[i] );
863 OUT_RING( CP_PACKET3( R200_3D_DRAW_IMMD_2, 12 ) );
864 OUT_RING( (RADEON_PRIM_TYPE_RECT_LIST |
865 RADEON_PRIM_WALK_RING |
866 (3 << RADEON_NUM_VERTICES_SHIFT)) );
867 OUT_RING( depth_boxes[i].ui[CLEAR_X1] );
868 OUT_RING( depth_boxes[i].ui[CLEAR_Y1] );
869 OUT_RING( depth_boxes[i].ui[CLEAR_DEPTH] );
870 OUT_RING( 0x3f800000 );
871 OUT_RING( depth_boxes[i].ui[CLEAR_X1] );
872 OUT_RING( depth_boxes[i].ui[CLEAR_Y2] );
873 OUT_RING( depth_boxes[i].ui[CLEAR_DEPTH] );
874 OUT_RING( 0x3f800000 );
875 OUT_RING( depth_boxes[i].ui[CLEAR_X2] );
876 OUT_RING( depth_boxes[i].ui[CLEAR_Y2] );
877 OUT_RING( depth_boxes[i].ui[CLEAR_DEPTH] );
878 OUT_RING( 0x3f800000 );
882 else if ( (flags & (RADEON_DEPTH | RADEON_STENCIL)) ) {
884 rb3d_cntl = depth_clear->rb3d_cntl;
886 if ( flags & RADEON_DEPTH ) {
887 rb3d_cntl |= RADEON_Z_ENABLE;
889 rb3d_cntl &= ~RADEON_Z_ENABLE;
892 if ( flags & RADEON_STENCIL ) {
893 rb3d_cntl |= RADEON_STENCIL_ENABLE;
894 rb3d_stencilrefmask = clear->depth_mask; /* misnamed field */
896 rb3d_cntl &= ~RADEON_STENCIL_ENABLE;
897 rb3d_stencilrefmask = 0x00000000;
901 RADEON_WAIT_UNTIL_2D_IDLE();
903 OUT_RING( CP_PACKET0( RADEON_PP_CNTL, 1 ) );
904 OUT_RING( 0x00000000 );
905 OUT_RING( rb3d_cntl );
907 OUT_RING_REG( RADEON_RB3D_ZSTENCILCNTL,
908 depth_clear->rb3d_zstencilcntl );
909 OUT_RING_REG( RADEON_RB3D_STENCILREFMASK,
910 rb3d_stencilrefmask );
911 OUT_RING_REG( RADEON_RB3D_PLANEMASK,
913 OUT_RING_REG( RADEON_SE_CNTL,
914 depth_clear->se_cntl );
917 /* Make sure we restore the 3D state next time.
919 dev_priv->sarea_priv->ctx_owner = 0;
921 for ( i = 0 ; i < nbox ; i++ ) {
923 /* Funny that this should be required --
926 radeon_emit_clip_rect( dev_priv,
927 &sarea_priv->boxes[i] );
931 OUT_RING( CP_PACKET3( RADEON_3D_DRAW_IMMD, 13 ) );
932 OUT_RING( RADEON_VTX_Z_PRESENT |
933 RADEON_VTX_PKCOLOR_PRESENT);
934 OUT_RING( (RADEON_PRIM_TYPE_RECT_LIST |
935 RADEON_PRIM_WALK_RING |
937 RADEON_VTX_FMT_RADEON_MODE |
938 (3 << RADEON_NUM_VERTICES_SHIFT)) );
941 OUT_RING( depth_boxes[i].ui[CLEAR_X1] );
942 OUT_RING( depth_boxes[i].ui[CLEAR_Y1] );
943 OUT_RING( depth_boxes[i].ui[CLEAR_DEPTH] );
946 OUT_RING( depth_boxes[i].ui[CLEAR_X1] );
947 OUT_RING( depth_boxes[i].ui[CLEAR_Y2] );
948 OUT_RING( depth_boxes[i].ui[CLEAR_DEPTH] );
951 OUT_RING( depth_boxes[i].ui[CLEAR_X2] );
952 OUT_RING( depth_boxes[i].ui[CLEAR_Y2] );
953 OUT_RING( depth_boxes[i].ui[CLEAR_DEPTH] );
960 /* Increment the clear counter. The client-side 3D driver must
961 * wait on this value before performing the clear ioctl. We
962 * need this because the card's so damned fast...
964 dev_priv->sarea_priv->last_clear++;
968 RADEON_CLEAR_AGE( dev_priv->sarea_priv->last_clear );
969 RADEON_WAIT_UNTIL_IDLE();
974 static void radeon_cp_dispatch_swap( drm_device_t *dev )
976 drm_radeon_private_t *dev_priv = dev->dev_private;
977 drm_radeon_sarea_t *sarea_priv = dev_priv->sarea_priv;
978 int nbox = sarea_priv->nbox;
979 drm_clip_rect_t *pbox = sarea_priv->boxes;
984 /* Do some trivial performance monitoring...
986 if (dev_priv->do_boxes)
987 radeon_cp_performance_boxes( dev_priv );
990 /* Wait for the 3D stream to idle before dispatching the bitblt.
991 * This will prevent data corruption between the two streams.
995 RADEON_WAIT_UNTIL_3D_IDLE();
999 for ( i = 0 ; i < nbox ; i++ ) {
1002 int w = pbox[i].x2 - x;
1003 int h = pbox[i].y2 - y;
1005 DRM_DEBUG( "dispatch swap %d,%d-%d,%d\n",
1010 OUT_RING( CP_PACKET3( RADEON_CNTL_BITBLT_MULTI, 5 ) );
1011 OUT_RING( RADEON_GMC_SRC_PITCH_OFFSET_CNTL |
1012 RADEON_GMC_DST_PITCH_OFFSET_CNTL |
1013 RADEON_GMC_BRUSH_NONE |
1014 (dev_priv->color_fmt << 8) |
1015 RADEON_GMC_SRC_DATATYPE_COLOR |
1017 RADEON_DP_SRC_SOURCE_MEMORY |
1018 RADEON_GMC_CLR_CMP_CNTL_DIS |
1019 RADEON_GMC_WR_MSK_DIS );
1021 /* Make this work even if front & back are flipped:
1023 if (dev_priv->current_page == 0) {
1024 OUT_RING( dev_priv->back_pitch_offset );
1025 OUT_RING( dev_priv->front_pitch_offset );
1028 OUT_RING( dev_priv->front_pitch_offset );
1029 OUT_RING( dev_priv->back_pitch_offset );
1032 OUT_RING( (x << 16) | y );
1033 OUT_RING( (x << 16) | y );
1034 OUT_RING( (w << 16) | h );
1039 /* Increment the frame counter. The client-side 3D driver must
1040 * throttle the framerate by waiting for this value before
1041 * performing the swapbuffer ioctl.
1043 dev_priv->sarea_priv->last_frame++;
1047 RADEON_FRAME_AGE( dev_priv->sarea_priv->last_frame );
1048 RADEON_WAIT_UNTIL_2D_IDLE();
1053 static void radeon_cp_dispatch_flip( drm_device_t *dev )
1055 drm_radeon_private_t *dev_priv = dev->dev_private;
1056 drm_sarea_t *sarea = (drm_sarea_t *)dev_priv->sarea->handle;
1057 int offset = (dev_priv->current_page == 1)
1058 ? dev_priv->front_offset : dev_priv->back_offset;
1060 DRM_DEBUG( "%s: page=%d pfCurrentPage=%d\n",
1062 dev_priv->current_page,
1063 dev_priv->sarea_priv->pfCurrentPage);
1065 /* Do some trivial performance monitoring...
1067 if (dev_priv->do_boxes) {
1068 dev_priv->stats.boxes |= RADEON_BOX_FLIP;
1069 radeon_cp_performance_boxes( dev_priv );
1072 /* Update the frame offsets for both CRTCs
1076 RADEON_WAIT_UNTIL_3D_IDLE();
1077 OUT_RING_REG( RADEON_CRTC_OFFSET, ( ( sarea->frame.y * dev_priv->front_pitch
1079 * ( dev_priv->color_fmt - 2 ) ) & ~7 )
1081 OUT_RING_REG( RADEON_CRTC2_OFFSET, dev_priv->sarea_priv->crtc2_base
1086 /* Increment the frame counter. The client-side 3D driver must
1087 * throttle the framerate by waiting for this value before
1088 * performing the swapbuffer ioctl.
1090 dev_priv->sarea_priv->last_frame++;
1091 dev_priv->sarea_priv->pfCurrentPage = dev_priv->current_page =
1092 1 - dev_priv->current_page;
1096 RADEON_FRAME_AGE( dev_priv->sarea_priv->last_frame );
1101 static int bad_prim_vertex_nr( int primitive, int nr )
1103 switch (primitive & RADEON_PRIM_TYPE_MASK) {
1104 case RADEON_PRIM_TYPE_NONE:
1105 case RADEON_PRIM_TYPE_POINT:
1107 case RADEON_PRIM_TYPE_LINE:
1108 return (nr & 1) || nr == 0;
1109 case RADEON_PRIM_TYPE_LINE_STRIP:
1111 case RADEON_PRIM_TYPE_TRI_LIST:
1112 case RADEON_PRIM_TYPE_3VRT_POINT_LIST:
1113 case RADEON_PRIM_TYPE_3VRT_LINE_LIST:
1114 case RADEON_PRIM_TYPE_RECT_LIST:
1115 return nr % 3 || nr == 0;
1116 case RADEON_PRIM_TYPE_TRI_FAN:
1117 case RADEON_PRIM_TYPE_TRI_STRIP:
1128 unsigned int finish;
1130 unsigned int numverts;
1131 unsigned int offset;
1132 unsigned int vc_format;
1133 } drm_radeon_tcl_prim_t;
1135 static void radeon_cp_dispatch_vertex( drm_device_t *dev,
1137 drm_radeon_tcl_prim_t *prim )
1140 drm_radeon_private_t *dev_priv = dev->dev_private;
1141 drm_radeon_sarea_t *sarea_priv = dev_priv->sarea_priv;
1142 int offset = dev_priv->gart_buffers_offset + buf->offset + prim->start;
1143 int numverts = (int)prim->numverts;
1144 int nbox = sarea_priv->nbox;
1148 DRM_DEBUG("hwprim 0x%x vfmt 0x%x %d..%d %d verts\n",
1155 if (bad_prim_vertex_nr( prim->prim, prim->numverts )) {
1156 DRM_ERROR( "bad prim %x numverts %d\n",
1157 prim->prim, prim->numverts );
1162 /* Emit the next cliprect */
1164 radeon_emit_clip_rect( dev_priv,
1165 &sarea_priv->boxes[i] );
1168 /* Emit the vertex buffer rendering commands */
1171 OUT_RING( CP_PACKET3( RADEON_3D_RNDR_GEN_INDX_PRIM, 3 ) );
1173 OUT_RING( numverts );
1174 OUT_RING( prim->vc_format );
1175 OUT_RING( prim->prim | RADEON_PRIM_WALK_LIST |
1176 RADEON_COLOR_ORDER_RGBA |
1177 RADEON_VTX_FMT_RADEON_MODE |
1178 (numverts << RADEON_NUM_VERTICES_SHIFT) );
1183 } while ( i < nbox );
1188 static void radeon_cp_discard_buffer( drm_device_t *dev, drm_buf_t *buf )
1190 drm_radeon_private_t *dev_priv = dev->dev_private;
1191 drm_radeon_buf_priv_t *buf_priv = buf->dev_private;
1194 buf_priv->age = ++dev_priv->sarea_priv->last_dispatch;
1196 /* Emit the vertex buffer age */
1198 RADEON_DISPATCH_AGE( buf_priv->age );
1205 static void radeon_cp_dispatch_indirect( drm_device_t *dev,
1207 int start, int end )
1209 drm_radeon_private_t *dev_priv = dev->dev_private;
1211 DRM_DEBUG( "indirect: buf=%d s=0x%x e=0x%x\n",
1212 buf->idx, start, end );
1214 if ( start != end ) {
1215 int offset = (dev_priv->gart_buffers_offset
1216 + buf->offset + start);
1217 int dwords = (end - start + 3) / sizeof(u32);
1219 /* Indirect buffer data must be an even number of
1220 * dwords, so if we've been given an odd number we must
1221 * pad the data with a Type-2 CP packet.
1225 ((char *)dev->agp_buffer_map->handle
1226 + buf->offset + start);
1227 data[dwords++] = RADEON_CP_PACKET2;
1230 /* Fire off the indirect buffer */
1233 OUT_RING( CP_PACKET0( RADEON_CP_IB_BASE, 1 ) );
1242 static void radeon_cp_dispatch_indices( drm_device_t *dev,
1244 drm_radeon_tcl_prim_t *prim )
1246 drm_radeon_private_t *dev_priv = dev->dev_private;
1247 drm_radeon_sarea_t *sarea_priv = dev_priv->sarea_priv;
1248 int offset = dev_priv->gart_buffers_offset + prim->offset;
1252 int start = prim->start + RADEON_INDEX_PRIM_OFFSET;
1253 int count = (prim->finish - start) / sizeof(u16);
1254 int nbox = sarea_priv->nbox;
1256 DRM_DEBUG("hwprim 0x%x vfmt 0x%x %d..%d offset: %x nr %d\n",
1264 if (bad_prim_vertex_nr( prim->prim, count )) {
1265 DRM_ERROR( "bad prim %x count %d\n",
1266 prim->prim, count );
1271 if ( start >= prim->finish ||
1272 (prim->start & 0x7) ) {
1273 DRM_ERROR( "buffer prim %d\n", prim->prim );
1277 dwords = (prim->finish - prim->start + 3) / sizeof(u32);
1279 data = (u32 *)((char *)dev->agp_buffer_map->handle +
1280 elt_buf->offset + prim->start);
1282 data[0] = CP_PACKET3( RADEON_3D_RNDR_GEN_INDX_PRIM, dwords-2 );
1284 data[2] = prim->numverts;
1285 data[3] = prim->vc_format;
1286 data[4] = (prim->prim |
1287 RADEON_PRIM_WALK_IND |
1288 RADEON_COLOR_ORDER_RGBA |
1289 RADEON_VTX_FMT_RADEON_MODE |
1290 (count << RADEON_NUM_VERTICES_SHIFT) );
1294 radeon_emit_clip_rect( dev_priv,
1295 &sarea_priv->boxes[i] );
1297 radeon_cp_dispatch_indirect( dev, elt_buf,
1302 } while ( i < nbox );
1306 #define RADEON_MAX_TEXTURE_SIZE (RADEON_BUFFER_SIZE - 8 * sizeof(u32))
1308 static int radeon_cp_dispatch_texture( DRMFILE filp,
1310 drm_radeon_texture_t *tex,
1311 drm_radeon_tex_image_t *image )
1313 drm_radeon_private_t *dev_priv = dev->dev_private;
1314 drm_file_t *filp_priv;
1318 const u8 __user *data;
1319 int size, dwords, tex_width, blit_width;
1324 DRM_GET_PRIV_WITH_RETURN( filp_priv, filp );
1326 if ( radeon_check_and_fixup_offset( dev_priv, filp_priv, &tex->offset ) ) {
1327 DRM_ERROR( "Invalid destination offset\n" );
1328 return DRM_ERR( EINVAL );
1331 dev_priv->stats.boxes |= RADEON_BOX_TEXTURE_LOAD;
1333 /* Flush the pixel cache. This ensures no pixel data gets mixed
1334 * up with the texture data from the host data blit, otherwise
1335 * part of the texture image may be corrupted.
1338 RADEON_FLUSH_CACHE();
1339 RADEON_WAIT_UNTIL_IDLE();
1343 /* The Mesa texture functions provide the data in little endian as the
1344 * chip wants it, but we need to compensate for the fact that the CP
1345 * ring gets byte-swapped
1348 OUT_RING_REG( RADEON_RBBM_GUICNTL, RADEON_HOST_DATA_SWAP_32BIT );
1353 /* The compiler won't optimize away a division by a variable,
1354 * even if the only legal values are powers of two. Thus, we'll
1355 * use a shift instead.
1357 switch ( tex->format ) {
1358 case RADEON_TXFORMAT_ARGB8888:
1359 case RADEON_TXFORMAT_RGBA8888:
1360 format = RADEON_COLOR_FORMAT_ARGB8888;
1361 tex_width = tex->width * 4;
1362 blit_width = image->width * 4;
1364 case RADEON_TXFORMAT_AI88:
1365 case RADEON_TXFORMAT_ARGB1555:
1366 case RADEON_TXFORMAT_RGB565:
1367 case RADEON_TXFORMAT_ARGB4444:
1368 case RADEON_TXFORMAT_VYUY422:
1369 case RADEON_TXFORMAT_YVYU422:
1370 format = RADEON_COLOR_FORMAT_RGB565;
1371 tex_width = tex->width * 2;
1372 blit_width = image->width * 2;
1374 case RADEON_TXFORMAT_I8:
1375 case RADEON_TXFORMAT_RGB332:
1376 format = RADEON_COLOR_FORMAT_CI8;
1377 tex_width = tex->width * 1;
1378 blit_width = image->width * 1;
1381 DRM_ERROR( "invalid texture format %d\n", tex->format );
1382 return DRM_ERR(EINVAL);
1385 DRM_DEBUG("tex=%dx%d blit=%d\n", tex_width, tex->height, blit_width );
1388 DRM_DEBUG( "tex: ofs=0x%x p=%d f=%d x=%hd y=%hd w=%hd h=%hd\n",
1389 tex->offset >> 10, tex->pitch, tex->format,
1390 image->x, image->y, image->width, image->height );
1392 /* Make a copy of some parameters in case we have to
1393 * update them for a multi-pass texture blit.
1395 height = image->height;
1396 data = (const u8 __user *)image->data;
1398 size = height * blit_width;
1400 if ( size > RADEON_MAX_TEXTURE_SIZE ) {
1401 height = RADEON_MAX_TEXTURE_SIZE / blit_width;
1402 size = height * blit_width;
1403 } else if ( size < 4 && size > 0 ) {
1405 } else if ( size == 0 ) {
1409 buf = radeon_freelist_get( dev );
1411 radeon_do_cp_idle( dev_priv );
1412 buf = radeon_freelist_get( dev );
1415 DRM_DEBUG("radeon_cp_dispatch_texture: EAGAIN\n");
1416 DRM_COPY_TO_USER( tex->image, image, sizeof(*image) );
1417 return DRM_ERR(EAGAIN);
1421 /* Dispatch the indirect buffer.
1423 buffer = (u32*)((char*)dev->agp_buffer_map->handle + buf->offset);
1425 buffer[0] = CP_PACKET3( RADEON_CNTL_HOSTDATA_BLT, dwords + 6 );
1426 buffer[1] = (RADEON_GMC_DST_PITCH_OFFSET_CNTL |
1427 RADEON_GMC_BRUSH_NONE |
1429 RADEON_GMC_SRC_DATATYPE_COLOR |
1431 RADEON_DP_SRC_SOURCE_HOST_DATA |
1432 RADEON_GMC_CLR_CMP_CNTL_DIS |
1433 RADEON_GMC_WR_MSK_DIS);
1435 buffer[2] = (tex->pitch << 22) | (tex->offset >> 10);
1436 buffer[3] = 0xffffffff;
1437 buffer[4] = 0xffffffff;
1438 buffer[5] = (image->y << 16) | image->x;
1439 buffer[6] = (height << 16) | image->width;
1443 if ( tex_width >= 32 ) {
1444 /* Texture image width is larger than the minimum, so we
1445 * can upload it directly.
1447 if ( DRM_COPY_FROM_USER( buffer, data,
1448 dwords * sizeof(u32) ) ) {
1449 DRM_ERROR( "EFAULT on data, %d dwords\n",
1451 return DRM_ERR(EFAULT);
1454 /* Texture image width is less than the minimum, so we
1455 * need to pad out each image scanline to the minimum
1458 for ( i = 0 ; i < tex->height ; i++ ) {
1459 if ( DRM_COPY_FROM_USER( buffer, data,
1461 DRM_ERROR( "EFAULT on pad, %d bytes\n",
1463 return DRM_ERR(EFAULT);
1471 buf->used = (dwords + 8) * sizeof(u32);
1472 radeon_cp_dispatch_indirect( dev, buf, 0, buf->used );
1473 radeon_cp_discard_buffer( dev, buf );
1475 /* Update the input parameters for next time */
1477 image->height -= height;
1478 image->data = (const u8 __user *)image->data + size;
1479 } while (image->height > 0);
1481 /* Flush the pixel cache after the blit completes. This ensures
1482 * the texture data is written out to memory before rendering
1486 RADEON_FLUSH_CACHE();
1487 RADEON_WAIT_UNTIL_2D_IDLE();
1493 static void radeon_cp_dispatch_stipple( drm_device_t *dev, u32 *stipple )
1495 drm_radeon_private_t *dev_priv = dev->dev_private;
1502 OUT_RING( CP_PACKET0( RADEON_RE_STIPPLE_ADDR, 0 ) );
1503 OUT_RING( 0x00000000 );
1505 OUT_RING( CP_PACKET0_TABLE( RADEON_RE_STIPPLE_DATA, 31 ) );
1506 for ( i = 0 ; i < 32 ; i++ ) {
1507 OUT_RING( stipple[i] );
1514 /* ================================================================
1518 int radeon_cp_clear( DRM_IOCTL_ARGS )
1521 drm_radeon_private_t *dev_priv = dev->dev_private;
1522 drm_radeon_sarea_t *sarea_priv = dev_priv->sarea_priv;
1523 drm_radeon_clear_t clear;
1524 drm_radeon_clear_rect_t depth_boxes[RADEON_NR_SAREA_CLIPRECTS];
1527 LOCK_TEST_WITH_RETURN( dev, filp );
1529 DRM_COPY_FROM_USER_IOCTL( clear, (drm_radeon_clear_t __user *)data,
1532 RING_SPACE_TEST_WITH_RETURN( dev_priv );
1534 if ( sarea_priv->nbox > RADEON_NR_SAREA_CLIPRECTS )
1535 sarea_priv->nbox = RADEON_NR_SAREA_CLIPRECTS;
1537 if ( DRM_COPY_FROM_USER( &depth_boxes, clear.depth_boxes,
1538 sarea_priv->nbox * sizeof(depth_boxes[0]) ) )
1539 return DRM_ERR(EFAULT);
1541 radeon_cp_dispatch_clear( dev, &clear, depth_boxes );
1548 /* Not sure why this isn't set all the time:
1550 static int radeon_do_init_pageflip( drm_device_t *dev )
1552 drm_radeon_private_t *dev_priv = dev->dev_private;
1558 RADEON_WAIT_UNTIL_3D_IDLE();
1559 OUT_RING( CP_PACKET0( RADEON_CRTC_OFFSET_CNTL, 0 ) );
1560 OUT_RING( RADEON_READ( RADEON_CRTC_OFFSET_CNTL ) | RADEON_CRTC_OFFSET_FLIP_CNTL );
1561 OUT_RING( CP_PACKET0( RADEON_CRTC2_OFFSET_CNTL, 0 ) );
1562 OUT_RING( RADEON_READ( RADEON_CRTC2_OFFSET_CNTL ) | RADEON_CRTC_OFFSET_FLIP_CNTL );
1565 dev_priv->page_flipping = 1;
1566 dev_priv->current_page = 0;
1567 dev_priv->sarea_priv->pfCurrentPage = dev_priv->current_page;
1572 /* Called whenever a client dies, from DRM(release).
1573 * NOTE: Lock isn't necessarily held when this is called!
1575 int radeon_do_cleanup_pageflip( drm_device_t *dev )
1577 drm_radeon_private_t *dev_priv = dev->dev_private;
1580 if (dev_priv->current_page != 0)
1581 radeon_cp_dispatch_flip( dev );
1583 dev_priv->page_flipping = 0;
1587 /* Swapping and flipping are different operations, need different ioctls.
1588 * They can & should be intermixed to support multiple 3d windows.
1590 int radeon_cp_flip( DRM_IOCTL_ARGS )
1593 drm_radeon_private_t *dev_priv = dev->dev_private;
1596 LOCK_TEST_WITH_RETURN( dev, filp );
1598 RING_SPACE_TEST_WITH_RETURN( dev_priv );
1600 if (!dev_priv->page_flipping)
1601 radeon_do_init_pageflip( dev );
1603 radeon_cp_dispatch_flip( dev );
1609 int radeon_cp_swap( DRM_IOCTL_ARGS )
1612 drm_radeon_private_t *dev_priv = dev->dev_private;
1613 drm_radeon_sarea_t *sarea_priv = dev_priv->sarea_priv;
1616 LOCK_TEST_WITH_RETURN( dev, filp );
1618 RING_SPACE_TEST_WITH_RETURN( dev_priv );
1620 if ( sarea_priv->nbox > RADEON_NR_SAREA_CLIPRECTS )
1621 sarea_priv->nbox = RADEON_NR_SAREA_CLIPRECTS;
1623 radeon_cp_dispatch_swap( dev );
1624 dev_priv->sarea_priv->ctx_owner = 0;
1630 int radeon_cp_vertex( DRM_IOCTL_ARGS )
1633 drm_radeon_private_t *dev_priv = dev->dev_private;
1634 drm_file_t *filp_priv;
1635 drm_radeon_sarea_t *sarea_priv = dev_priv->sarea_priv;
1636 drm_device_dma_t *dma = dev->dma;
1638 drm_radeon_vertex_t vertex;
1639 drm_radeon_tcl_prim_t prim;
1641 LOCK_TEST_WITH_RETURN( dev, filp );
1643 DRM_GET_PRIV_WITH_RETURN( filp_priv, filp );
1645 DRM_COPY_FROM_USER_IOCTL( vertex, (drm_radeon_vertex_t __user *)data,
1648 DRM_DEBUG( "pid=%d index=%d count=%d discard=%d\n",
1650 vertex.idx, vertex.count, vertex.discard );
1652 if ( vertex.idx < 0 || vertex.idx >= dma->buf_count ) {
1653 DRM_ERROR( "buffer index %d (of %d max)\n",
1654 vertex.idx, dma->buf_count - 1 );
1655 return DRM_ERR(EINVAL);
1657 if ( vertex.prim < 0 ||
1658 vertex.prim > RADEON_PRIM_TYPE_3VRT_LINE_LIST ) {
1659 DRM_ERROR( "buffer prim %d\n", vertex.prim );
1660 return DRM_ERR(EINVAL);
1663 RING_SPACE_TEST_WITH_RETURN( dev_priv );
1664 VB_AGE_TEST_WITH_RETURN( dev_priv );
1666 buf = dma->buflist[vertex.idx];
1668 if ( buf->filp != filp ) {
1669 DRM_ERROR( "process %d using buffer owned by %p\n",
1670 DRM_CURRENTPID, buf->filp );
1671 return DRM_ERR(EINVAL);
1673 if ( buf->pending ) {
1674 DRM_ERROR( "sending pending buffer %d\n", vertex.idx );
1675 return DRM_ERR(EINVAL);
1678 /* Build up a prim_t record:
1681 buf->used = vertex.count; /* not used? */
1683 if ( sarea_priv->dirty & ~RADEON_UPLOAD_CLIPRECTS ) {
1684 if ( radeon_emit_state( dev_priv, filp_priv,
1685 &sarea_priv->context_state,
1686 sarea_priv->tex_state,
1687 sarea_priv->dirty ) ) {
1688 DRM_ERROR( "radeon_emit_state failed\n" );
1689 return DRM_ERR( EINVAL );
1692 sarea_priv->dirty &= ~(RADEON_UPLOAD_TEX0IMAGES |
1693 RADEON_UPLOAD_TEX1IMAGES |
1694 RADEON_UPLOAD_TEX2IMAGES |
1695 RADEON_REQUIRE_QUIESCENCE);
1699 prim.finish = vertex.count; /* unused */
1700 prim.prim = vertex.prim;
1701 prim.numverts = vertex.count;
1702 prim.vc_format = dev_priv->sarea_priv->vc_format;
1704 radeon_cp_dispatch_vertex( dev, buf, &prim );
1707 if (vertex.discard) {
1708 radeon_cp_discard_buffer( dev, buf );
1715 int radeon_cp_indices( DRM_IOCTL_ARGS )
1718 drm_radeon_private_t *dev_priv = dev->dev_private;
1719 drm_file_t *filp_priv;
1720 drm_radeon_sarea_t *sarea_priv = dev_priv->sarea_priv;
1721 drm_device_dma_t *dma = dev->dma;
1723 drm_radeon_indices_t elts;
1724 drm_radeon_tcl_prim_t prim;
1727 LOCK_TEST_WITH_RETURN( dev, filp );
1730 DRM_ERROR( "%s called with no initialization\n", __FUNCTION__ );
1731 return DRM_ERR(EINVAL);
1734 DRM_GET_PRIV_WITH_RETURN( filp_priv, filp );
1736 DRM_COPY_FROM_USER_IOCTL( elts, (drm_radeon_indices_t __user *)data,
1739 DRM_DEBUG( "pid=%d index=%d start=%d end=%d discard=%d\n",
1741 elts.idx, elts.start, elts.end, elts.discard );
1743 if ( elts.idx < 0 || elts.idx >= dma->buf_count ) {
1744 DRM_ERROR( "buffer index %d (of %d max)\n",
1745 elts.idx, dma->buf_count - 1 );
1746 return DRM_ERR(EINVAL);
1748 if ( elts.prim < 0 ||
1749 elts.prim > RADEON_PRIM_TYPE_3VRT_LINE_LIST ) {
1750 DRM_ERROR( "buffer prim %d\n", elts.prim );
1751 return DRM_ERR(EINVAL);
1754 RING_SPACE_TEST_WITH_RETURN( dev_priv );
1755 VB_AGE_TEST_WITH_RETURN( dev_priv );
1757 buf = dma->buflist[elts.idx];
1759 if ( buf->filp != filp ) {
1760 DRM_ERROR( "process %d using buffer owned by %p\n",
1761 DRM_CURRENTPID, buf->filp );
1762 return DRM_ERR(EINVAL);
1764 if ( buf->pending ) {
1765 DRM_ERROR( "sending pending buffer %d\n", elts.idx );
1766 return DRM_ERR(EINVAL);
1769 count = (elts.end - elts.start) / sizeof(u16);
1770 elts.start -= RADEON_INDEX_PRIM_OFFSET;
1772 if ( elts.start & 0x7 ) {
1773 DRM_ERROR( "misaligned buffer 0x%x\n", elts.start );
1774 return DRM_ERR(EINVAL);
1776 if ( elts.start < buf->used ) {
1777 DRM_ERROR( "no header 0x%x - 0x%x\n", elts.start, buf->used );
1778 return DRM_ERR(EINVAL);
1781 buf->used = elts.end;
1783 if ( sarea_priv->dirty & ~RADEON_UPLOAD_CLIPRECTS ) {
1784 if ( radeon_emit_state( dev_priv, filp_priv,
1785 &sarea_priv->context_state,
1786 sarea_priv->tex_state,
1787 sarea_priv->dirty ) ) {
1788 DRM_ERROR( "radeon_emit_state failed\n" );
1789 return DRM_ERR( EINVAL );
1792 sarea_priv->dirty &= ~(RADEON_UPLOAD_TEX0IMAGES |
1793 RADEON_UPLOAD_TEX1IMAGES |
1794 RADEON_UPLOAD_TEX2IMAGES |
1795 RADEON_REQUIRE_QUIESCENCE);
1799 /* Build up a prim_t record:
1801 prim.start = elts.start;
1802 prim.finish = elts.end;
1803 prim.prim = elts.prim;
1804 prim.offset = 0; /* offset from start of dma buffers */
1805 prim.numverts = RADEON_MAX_VB_VERTS; /* duh */
1806 prim.vc_format = dev_priv->sarea_priv->vc_format;
1808 radeon_cp_dispatch_indices( dev, buf, &prim );
1810 radeon_cp_discard_buffer( dev, buf );
1817 int radeon_cp_texture( DRM_IOCTL_ARGS )
1820 drm_radeon_private_t *dev_priv = dev->dev_private;
1821 drm_radeon_texture_t tex;
1822 drm_radeon_tex_image_t image;
1825 LOCK_TEST_WITH_RETURN( dev, filp );
1827 DRM_COPY_FROM_USER_IOCTL( tex, (drm_radeon_texture_t __user *)data, sizeof(tex) );
1829 if ( tex.image == NULL ) {
1830 DRM_ERROR( "null texture image!\n" );
1831 return DRM_ERR(EINVAL);
1834 if ( DRM_COPY_FROM_USER( &image,
1835 (drm_radeon_tex_image_t __user *)tex.image,
1837 return DRM_ERR(EFAULT);
1839 RING_SPACE_TEST_WITH_RETURN( dev_priv );
1840 VB_AGE_TEST_WITH_RETURN( dev_priv );
1842 ret = radeon_cp_dispatch_texture( filp, dev, &tex, &image );
1848 int radeon_cp_stipple( DRM_IOCTL_ARGS )
1851 drm_radeon_private_t *dev_priv = dev->dev_private;
1852 drm_radeon_stipple_t stipple;
1855 LOCK_TEST_WITH_RETURN( dev, filp );
1857 DRM_COPY_FROM_USER_IOCTL( stipple, (drm_radeon_stipple_t __user *)data,
1860 if ( DRM_COPY_FROM_USER( &mask, stipple.mask, 32 * sizeof(u32) ) )
1861 return DRM_ERR(EFAULT);
1863 RING_SPACE_TEST_WITH_RETURN( dev_priv );
1865 radeon_cp_dispatch_stipple( dev, mask );
1871 int radeon_cp_indirect( DRM_IOCTL_ARGS )
1874 drm_radeon_private_t *dev_priv = dev->dev_private;
1875 drm_device_dma_t *dma = dev->dma;
1877 drm_radeon_indirect_t indirect;
1880 LOCK_TEST_WITH_RETURN( dev, filp );
1883 DRM_ERROR( "%s called with no initialization\n", __FUNCTION__ );
1884 return DRM_ERR(EINVAL);
1887 DRM_COPY_FROM_USER_IOCTL( indirect, (drm_radeon_indirect_t __user *)data,
1890 DRM_DEBUG( "indirect: idx=%d s=%d e=%d d=%d\n",
1891 indirect.idx, indirect.start,
1892 indirect.end, indirect.discard );
1894 if ( indirect.idx < 0 || indirect.idx >= dma->buf_count ) {
1895 DRM_ERROR( "buffer index %d (of %d max)\n",
1896 indirect.idx, dma->buf_count - 1 );
1897 return DRM_ERR(EINVAL);
1900 buf = dma->buflist[indirect.idx];
1902 if ( buf->filp != filp ) {
1903 DRM_ERROR( "process %d using buffer owned by %p\n",
1904 DRM_CURRENTPID, buf->filp );
1905 return DRM_ERR(EINVAL);
1907 if ( buf->pending ) {
1908 DRM_ERROR( "sending pending buffer %d\n", indirect.idx );
1909 return DRM_ERR(EINVAL);
1912 if ( indirect.start < buf->used ) {
1913 DRM_ERROR( "reusing indirect: start=0x%x actual=0x%x\n",
1914 indirect.start, buf->used );
1915 return DRM_ERR(EINVAL);
1918 RING_SPACE_TEST_WITH_RETURN( dev_priv );
1919 VB_AGE_TEST_WITH_RETURN( dev_priv );
1921 buf->used = indirect.end;
1923 /* Wait for the 3D stream to idle before the indirect buffer
1924 * containing 2D acceleration commands is processed.
1928 RADEON_WAIT_UNTIL_3D_IDLE();
1932 /* Dispatch the indirect buffer full of commands from the
1933 * X server. This is insecure and is thus only available to
1934 * privileged clients.
1936 radeon_cp_dispatch_indirect( dev, buf, indirect.start, indirect.end );
1937 if (indirect.discard) {
1938 radeon_cp_discard_buffer( dev, buf );
1946 int radeon_cp_vertex2( DRM_IOCTL_ARGS )
1949 drm_radeon_private_t *dev_priv = dev->dev_private;
1950 drm_file_t *filp_priv;
1951 drm_radeon_sarea_t *sarea_priv = dev_priv->sarea_priv;
1952 drm_device_dma_t *dma = dev->dma;
1954 drm_radeon_vertex2_t vertex;
1956 unsigned char laststate;
1958 LOCK_TEST_WITH_RETURN( dev, filp );
1961 DRM_ERROR( "%s called with no initialization\n", __FUNCTION__ );
1962 return DRM_ERR(EINVAL);
1965 DRM_GET_PRIV_WITH_RETURN( filp_priv, filp );
1967 DRM_COPY_FROM_USER_IOCTL( vertex, (drm_radeon_vertex2_t __user *)data,
1970 DRM_DEBUG( "pid=%d index=%d discard=%d\n",
1972 vertex.idx, vertex.discard );
1974 if ( vertex.idx < 0 || vertex.idx >= dma->buf_count ) {
1975 DRM_ERROR( "buffer index %d (of %d max)\n",
1976 vertex.idx, dma->buf_count - 1 );
1977 return DRM_ERR(EINVAL);
1980 RING_SPACE_TEST_WITH_RETURN( dev_priv );
1981 VB_AGE_TEST_WITH_RETURN( dev_priv );
1983 buf = dma->buflist[vertex.idx];
1985 if ( buf->filp != filp ) {
1986 DRM_ERROR( "process %d using buffer owned by %p\n",
1987 DRM_CURRENTPID, buf->filp );
1988 return DRM_ERR(EINVAL);
1991 if ( buf->pending ) {
1992 DRM_ERROR( "sending pending buffer %d\n", vertex.idx );
1993 return DRM_ERR(EINVAL);
1996 if (sarea_priv->nbox > RADEON_NR_SAREA_CLIPRECTS)
1997 return DRM_ERR(EINVAL);
1999 for (laststate = 0xff, i = 0 ; i < vertex.nr_prims ; i++) {
2000 drm_radeon_prim_t prim;
2001 drm_radeon_tcl_prim_t tclprim;
2003 if ( DRM_COPY_FROM_USER( &prim, &vertex.prim[i], sizeof(prim) ) )
2004 return DRM_ERR(EFAULT);
2006 if ( prim.stateidx != laststate ) {
2007 drm_radeon_state_t state;
2009 if ( DRM_COPY_FROM_USER( &state,
2010 &vertex.state[prim.stateidx],
2012 return DRM_ERR(EFAULT);
2014 if ( radeon_emit_state2( dev_priv, filp_priv, &state ) ) {
2015 DRM_ERROR( "radeon_emit_state2 failed\n" );
2016 return DRM_ERR( EINVAL );
2019 laststate = prim.stateidx;
2022 tclprim.start = prim.start;
2023 tclprim.finish = prim.finish;
2024 tclprim.prim = prim.prim;
2025 tclprim.vc_format = prim.vc_format;
2027 if ( prim.prim & RADEON_PRIM_WALK_IND ) {
2028 tclprim.offset = prim.numverts * 64;
2029 tclprim.numverts = RADEON_MAX_VB_VERTS; /* duh */
2031 radeon_cp_dispatch_indices( dev, buf, &tclprim );
2033 tclprim.numverts = prim.numverts;
2034 tclprim.offset = 0; /* not used */
2036 radeon_cp_dispatch_vertex( dev, buf, &tclprim );
2039 if (sarea_priv->nbox == 1)
2040 sarea_priv->nbox = 0;
2043 if ( vertex.discard ) {
2044 radeon_cp_discard_buffer( dev, buf );
2052 static int radeon_emit_packets(
2053 drm_radeon_private_t *dev_priv,
2054 drm_file_t *filp_priv,
2055 drm_radeon_cmd_header_t header,
2056 drm_radeon_cmd_buffer_t *cmdbuf )
2058 int id = (int)header.packet.packet_id;
2060 int *data = (int *)cmdbuf->buf;
2063 if (id >= RADEON_MAX_STATE_PACKETS)
2064 return DRM_ERR(EINVAL);
2066 sz = packet[id].len;
2067 reg = packet[id].start;
2069 if (sz * sizeof(int) > cmdbuf->bufsz) {
2070 DRM_ERROR( "Packet size provided larger than data provided\n" );
2071 return DRM_ERR(EINVAL);
2074 if ( radeon_check_and_fixup_packets( dev_priv, filp_priv, id, data ) ) {
2075 DRM_ERROR( "Packet verification failed\n" );
2076 return DRM_ERR( EINVAL );
2080 OUT_RING( CP_PACKET0( reg, (sz-1) ) );
2081 OUT_RING_TABLE( data, sz );
2084 cmdbuf->buf += sz * sizeof(int);
2085 cmdbuf->bufsz -= sz * sizeof(int);
2089 static __inline__ int radeon_emit_scalars(
2090 drm_radeon_private_t *dev_priv,
2091 drm_radeon_cmd_header_t header,
2092 drm_radeon_cmd_buffer_t *cmdbuf )
2094 int sz = header.scalars.count;
2095 int start = header.scalars.offset;
2096 int stride = header.scalars.stride;
2100 OUT_RING( CP_PACKET0( RADEON_SE_TCL_SCALAR_INDX_REG, 0 ) );
2101 OUT_RING( start | (stride << RADEON_SCAL_INDX_DWORD_STRIDE_SHIFT));
2102 OUT_RING( CP_PACKET0_TABLE( RADEON_SE_TCL_SCALAR_DATA_REG, sz-1 ) );
2103 OUT_RING_TABLE( cmdbuf->buf, sz );
2105 cmdbuf->buf += sz * sizeof(int);
2106 cmdbuf->bufsz -= sz * sizeof(int);
2112 static __inline__ int radeon_emit_scalars2(
2113 drm_radeon_private_t *dev_priv,
2114 drm_radeon_cmd_header_t header,
2115 drm_radeon_cmd_buffer_t *cmdbuf )
2117 int sz = header.scalars.count;
2118 int start = ((unsigned int)header.scalars.offset) + 0x100;
2119 int stride = header.scalars.stride;
2123 OUT_RING( CP_PACKET0( RADEON_SE_TCL_SCALAR_INDX_REG, 0 ) );
2124 OUT_RING( start | (stride << RADEON_SCAL_INDX_DWORD_STRIDE_SHIFT));
2125 OUT_RING( CP_PACKET0_TABLE( RADEON_SE_TCL_SCALAR_DATA_REG, sz-1 ) );
2126 OUT_RING_TABLE( cmdbuf->buf, sz );
2128 cmdbuf->buf += sz * sizeof(int);
2129 cmdbuf->bufsz -= sz * sizeof(int);
2133 static __inline__ int radeon_emit_vectors(
2134 drm_radeon_private_t *dev_priv,
2135 drm_radeon_cmd_header_t header,
2136 drm_radeon_cmd_buffer_t *cmdbuf )
2138 int sz = header.vectors.count;
2139 int start = header.vectors.offset;
2140 int stride = header.vectors.stride;
2144 OUT_RING( CP_PACKET0( RADEON_SE_TCL_VECTOR_INDX_REG, 0 ) );
2145 OUT_RING( start | (stride << RADEON_VEC_INDX_OCTWORD_STRIDE_SHIFT));
2146 OUT_RING( CP_PACKET0_TABLE( RADEON_SE_TCL_VECTOR_DATA_REG, (sz-1) ) );
2147 OUT_RING_TABLE( cmdbuf->buf, sz );
2150 cmdbuf->buf += sz * sizeof(int);
2151 cmdbuf->bufsz -= sz * sizeof(int);
2156 static int radeon_emit_packet3( drm_device_t *dev,
2157 drm_file_t *filp_priv,
2158 drm_radeon_cmd_buffer_t *cmdbuf )
2160 drm_radeon_private_t *dev_priv = dev->dev_private;
2167 if ( ( ret = radeon_check_and_fixup_packet3( dev_priv, filp_priv,
2168 cmdbuf, &cmdsz ) ) ) {
2169 DRM_ERROR( "Packet verification failed\n" );
2173 BEGIN_RING( cmdsz );
2174 OUT_RING_TABLE( cmdbuf->buf, cmdsz );
2177 cmdbuf->buf += cmdsz * 4;
2178 cmdbuf->bufsz -= cmdsz * 4;
2183 static int radeon_emit_packet3_cliprect( drm_device_t *dev,
2184 drm_file_t *filp_priv,
2185 drm_radeon_cmd_buffer_t *cmdbuf,
2188 drm_radeon_private_t *dev_priv = dev->dev_private;
2189 drm_clip_rect_t box;
2192 drm_clip_rect_t __user *boxes = cmdbuf->boxes;
2198 if ( ( ret = radeon_check_and_fixup_packet3( dev_priv, filp_priv,
2199 cmdbuf, &cmdsz ) ) ) {
2200 DRM_ERROR( "Packet verification failed\n" );
2208 if ( i < cmdbuf->nbox ) {
2209 if (DRM_COPY_FROM_USER( &box, &boxes[i], sizeof(box) ))
2210 return DRM_ERR(EFAULT);
2211 /* FIXME The second and subsequent times round
2212 * this loop, send a WAIT_UNTIL_3D_IDLE before
2213 * calling emit_clip_rect(). This fixes a
2214 * lockup on fast machines when sending
2215 * several cliprects with a cmdbuf, as when
2216 * waving a 2D window over a 3D
2217 * window. Something in the commands from user
2218 * space seems to hang the card when they're
2219 * sent several times in a row. That would be
2220 * the correct place to fix it but this works
2221 * around it until I can figure that out - Tim
2225 RADEON_WAIT_UNTIL_3D_IDLE();
2228 radeon_emit_clip_rect( dev_priv, &box );
2231 BEGIN_RING( cmdsz );
2232 OUT_RING_TABLE( cmdbuf->buf, cmdsz );
2235 } while ( ++i < cmdbuf->nbox );
2236 if (cmdbuf->nbox == 1)
2240 cmdbuf->buf += cmdsz * 4;
2241 cmdbuf->bufsz -= cmdsz * 4;
2246 static int radeon_emit_wait( drm_device_t *dev, int flags )
2248 drm_radeon_private_t *dev_priv = dev->dev_private;
2251 DRM_DEBUG("%s: %x\n", __FUNCTION__, flags);
2253 case RADEON_WAIT_2D:
2255 RADEON_WAIT_UNTIL_2D_IDLE();
2258 case RADEON_WAIT_3D:
2260 RADEON_WAIT_UNTIL_3D_IDLE();
2263 case RADEON_WAIT_2D|RADEON_WAIT_3D:
2265 RADEON_WAIT_UNTIL_IDLE();
2269 return DRM_ERR(EINVAL);
2275 int radeon_cp_cmdbuf( DRM_IOCTL_ARGS )
2278 drm_radeon_private_t *dev_priv = dev->dev_private;
2279 drm_file_t *filp_priv;
2280 drm_device_dma_t *dma = dev->dma;
2281 drm_buf_t *buf = NULL;
2283 drm_radeon_cmd_buffer_t cmdbuf;
2284 drm_radeon_cmd_header_t header;
2285 int orig_nbox, orig_bufsz;
2288 LOCK_TEST_WITH_RETURN( dev, filp );
2291 DRM_ERROR( "%s called with no initialization\n", __FUNCTION__ );
2292 return DRM_ERR(EINVAL);
2295 DRM_GET_PRIV_WITH_RETURN( filp_priv, filp );
2297 DRM_COPY_FROM_USER_IOCTL( cmdbuf, (drm_radeon_cmd_buffer_t __user *)data,
2300 RING_SPACE_TEST_WITH_RETURN( dev_priv );
2301 VB_AGE_TEST_WITH_RETURN( dev_priv );
2303 if (cmdbuf.bufsz > 64*1024 || cmdbuf.bufsz<0) {
2304 return DRM_ERR(EINVAL);
2307 /* Allocate an in-kernel area and copy in the cmdbuf. Do this to avoid
2308 * races between checking values and using those values in other code,
2309 * and simply to avoid a lot of function calls to copy in data.
2311 orig_bufsz = cmdbuf.bufsz;
2312 if (orig_bufsz != 0) {
2313 kbuf = kmalloc(cmdbuf.bufsz, GFP_KERNEL);
2315 return DRM_ERR(ENOMEM);
2316 if (DRM_COPY_FROM_USER(kbuf, cmdbuf.buf, cmdbuf.bufsz))
2317 return DRM_ERR(EFAULT);
2321 orig_nbox = cmdbuf.nbox;
2323 while ( cmdbuf.bufsz >= sizeof(header) ) {
2325 header.i = *(int *)cmdbuf.buf;
2326 cmdbuf.buf += sizeof(header);
2327 cmdbuf.bufsz -= sizeof(header);
2329 switch (header.header.cmd_type) {
2330 case RADEON_CMD_PACKET:
2331 DRM_DEBUG("RADEON_CMD_PACKET\n");
2332 if (radeon_emit_packets( dev_priv, filp_priv, header, &cmdbuf )) {
2333 DRM_ERROR("radeon_emit_packets failed\n");
2338 case RADEON_CMD_SCALARS:
2339 DRM_DEBUG("RADEON_CMD_SCALARS\n");
2340 if (radeon_emit_scalars( dev_priv, header, &cmdbuf )) {
2341 DRM_ERROR("radeon_emit_scalars failed\n");
2346 case RADEON_CMD_VECTORS:
2347 DRM_DEBUG("RADEON_CMD_VECTORS\n");
2348 if (radeon_emit_vectors( dev_priv, header, &cmdbuf )) {
2349 DRM_ERROR("radeon_emit_vectors failed\n");
2354 case RADEON_CMD_DMA_DISCARD:
2355 DRM_DEBUG("RADEON_CMD_DMA_DISCARD\n");
2356 idx = header.dma.buf_idx;
2357 if ( idx < 0 || idx >= dma->buf_count ) {
2358 DRM_ERROR( "buffer index %d (of %d max)\n",
2359 idx, dma->buf_count - 1 );
2363 buf = dma->buflist[idx];
2364 if ( buf->filp != filp || buf->pending ) {
2365 DRM_ERROR( "bad buffer %p %p %d\n",
2366 buf->filp, filp, buf->pending);
2370 radeon_cp_discard_buffer( dev, buf );
2373 case RADEON_CMD_PACKET3:
2374 DRM_DEBUG("RADEON_CMD_PACKET3\n");
2375 if (radeon_emit_packet3( dev, filp_priv, &cmdbuf )) {
2376 DRM_ERROR("radeon_emit_packet3 failed\n");
2381 case RADEON_CMD_PACKET3_CLIP:
2382 DRM_DEBUG("RADEON_CMD_PACKET3_CLIP\n");
2383 if (radeon_emit_packet3_cliprect( dev, filp_priv, &cmdbuf, orig_nbox )) {
2384 DRM_ERROR("radeon_emit_packet3_clip failed\n");
2389 case RADEON_CMD_SCALARS2:
2390 DRM_DEBUG("RADEON_CMD_SCALARS2\n");
2391 if (radeon_emit_scalars2( dev_priv, header, &cmdbuf )) {
2392 DRM_ERROR("radeon_emit_scalars2 failed\n");
2397 case RADEON_CMD_WAIT:
2398 DRM_DEBUG("RADEON_CMD_WAIT\n");
2399 if (radeon_emit_wait( dev, header.wait.flags )) {
2400 DRM_ERROR("radeon_emit_wait failed\n");
2405 DRM_ERROR("bad cmd_type %d at %p\n",
2406 header.header.cmd_type,
2407 cmdbuf.buf - sizeof(header));
2412 if (orig_bufsz != 0)
2415 DRM_DEBUG("DONE\n");
2420 if (orig_bufsz != 0)
2422 return DRM_ERR(EINVAL);
2427 int radeon_cp_getparam( DRM_IOCTL_ARGS )
2430 drm_radeon_private_t *dev_priv = dev->dev_private;
2431 drm_radeon_getparam_t param;
2435 DRM_ERROR( "%s called with no initialization\n", __FUNCTION__ );
2436 return DRM_ERR(EINVAL);
2439 DRM_COPY_FROM_USER_IOCTL( param, (drm_radeon_getparam_t __user *)data,
2442 DRM_DEBUG( "pid=%d\n", DRM_CURRENTPID );
2444 switch( param.param ) {
2445 case RADEON_PARAM_GART_BUFFER_OFFSET:
2446 value = dev_priv->gart_buffers_offset;
2448 case RADEON_PARAM_LAST_FRAME:
2449 dev_priv->stats.last_frame_reads++;
2450 value = GET_SCRATCH( 0 );
2452 case RADEON_PARAM_LAST_DISPATCH:
2453 value = GET_SCRATCH( 1 );
2455 case RADEON_PARAM_LAST_CLEAR:
2456 dev_priv->stats.last_clear_reads++;
2457 value = GET_SCRATCH( 2 );
2459 case RADEON_PARAM_IRQ_NR:
2462 case RADEON_PARAM_GART_BASE:
2463 value = dev_priv->gart_vm_start;
2465 case RADEON_PARAM_REGISTER_HANDLE:
2466 value = dev_priv->mmio_offset;
2468 case RADEON_PARAM_STATUS_HANDLE:
2469 value = dev_priv->ring_rptr_offset;
2471 #if BITS_PER_LONG == 32
2473 * This ioctl() doesn't work on 64-bit platforms because hw_lock is a
2474 * pointer which can't fit into an int-sized variable. According to
2475 * Michel Dänzer, the ioctl() is only used on embedded platforms, so
2476 * not supporting it shouldn't be a problem. If the same functionality
2477 * is needed on 64-bit platforms, a new ioctl() would have to be added,
2478 * so backwards-compatibility for the embedded platforms can be
2479 * maintained. --davidm 4-Feb-2004.
2481 case RADEON_PARAM_SAREA_HANDLE:
2482 /* The lock is the first dword in the sarea. */
2483 value = (long)dev->lock.hw_lock;
2486 case RADEON_PARAM_GART_TEX_HANDLE:
2487 value = dev_priv->gart_textures_offset;
2490 return DRM_ERR(EINVAL);
2493 if ( DRM_COPY_TO_USER( param.value, &value, sizeof(int) ) ) {
2494 DRM_ERROR( "copy_to_user\n" );
2495 return DRM_ERR(EFAULT);
2501 int radeon_cp_setparam( DRM_IOCTL_ARGS ) {
2503 drm_radeon_private_t *dev_priv = dev->dev_private;
2504 drm_file_t *filp_priv;
2505 drm_radeon_setparam_t sp;
2506 struct drm_radeon_driver_file_fields *radeon_priv;
2509 DRM_ERROR( "%s called with no initialization\n", __FUNCTION__ );
2510 return DRM_ERR( EINVAL );
2513 DRM_GET_PRIV_WITH_RETURN( filp_priv, filp );
2515 DRM_COPY_FROM_USER_IOCTL( sp, ( drm_radeon_setparam_t __user * )data,
2518 switch( sp.param ) {
2519 case RADEON_SETPARAM_FB_LOCATION:
2520 radeon_priv = filp_priv->driver_priv;
2521 radeon_priv->radeon_fb_delta = dev_priv->fb_location - sp.value;
2524 DRM_DEBUG( "Invalid parameter %d\n", sp.param );
2525 return DRM_ERR( EINVAL );
2531 /* When a client dies:
2532 * - Check for and clean up flipped page state
2533 * - Free any alloced GART memory.
2535 * DRM infrastructure takes care of reclaiming dma buffers.
2537 static void radeon_driver_prerelease(drm_device_t *dev, DRMFILE filp)
2539 if ( dev->dev_private ) {
2540 drm_radeon_private_t *dev_priv = dev->dev_private;
2541 if ( dev_priv->page_flipping ) {
2542 radeon_do_cleanup_pageflip( dev );
2544 radeon_mem_release( filp, dev_priv->gart_heap );
2545 radeon_mem_release( filp, dev_priv->fb_heap );
2549 static void radeon_driver_pretakedown(drm_device_t *dev)
2551 radeon_do_release(dev);
2554 static int radeon_driver_open_helper(drm_device_t *dev, drm_file_t *filp_priv)
2556 drm_radeon_private_t *dev_priv = dev->dev_private;
2557 struct drm_radeon_driver_file_fields *radeon_priv;
2559 radeon_priv = (struct drm_radeon_driver_file_fields *)DRM(alloc)(sizeof(*radeon_priv), DRM_MEM_FILES);
2564 filp_priv->driver_priv = radeon_priv;
2566 radeon_priv->radeon_fb_delta = dev_priv->fb_location;
2568 radeon_priv->radeon_fb_delta = 0;
2573 static void radeon_driver_free_filp_priv(drm_device_t *dev, drm_file_t *filp_priv)
2575 struct drm_radeon_driver_file_fields *radeon_priv = filp_priv->driver_priv;
2577 DRM(free)(radeon_priv, sizeof(*radeon_priv), DRM_MEM_FILES);
2580 void radeon_driver_register_fns(struct drm_device *dev)
2582 dev->driver_features = DRIVER_USE_AGP | DRIVER_USE_MTRR | DRIVER_PCI_DMA | DRIVER_SG | DRIVER_HAVE_IRQ | DRIVER_HAVE_DMA | DRIVER_IRQ_SHARED | DRIVER_IRQ_VBL;
2583 dev->dev_priv_size = sizeof(drm_radeon_buf_priv_t);
2584 dev->fn_tbl.prerelease = radeon_driver_prerelease;
2585 dev->fn_tbl.pretakedown = radeon_driver_pretakedown;
2586 dev->fn_tbl.open_helper = radeon_driver_open_helper;
2587 dev->fn_tbl.free_filp_priv = radeon_driver_free_filp_priv;
2588 dev->fn_tbl.vblank_wait = radeon_driver_vblank_wait;
2589 dev->fn_tbl.irq_preinstall = radeon_driver_irq_preinstall;
2590 dev->fn_tbl.irq_postinstall = radeon_driver_irq_postinstall;
2591 dev->fn_tbl.irq_uninstall = radeon_driver_irq_uninstall;
2592 dev->fn_tbl.irq_handler = radeon_driver_irq_handler;