VERSION = 2
PATCHLEVEL = 6
SUBLEVEL = 10
-EXTRAVERSION = -1.14_FC2.1.planetlab
-NAME=AC 1
+EXTRAVERSION = -1.771_FC2.1.planetlab
+NAME=AC
# *DOCUMENTATION*
# To see a list of typical targets execute "make help"
#
# Automatically generated make config: don't edit
# Linux kernel version: 2.6.10-ac12
-# Tue Feb 15 15:58:28 2005
+# Wed May 18 16:34:48 2005
#
CONFIG_X86=y
CONFIG_MMU=y
#
# Automatically generated make config: don't edit
# Linux kernel version: 2.6.10-ac12
-# Tue Feb 15 15:58:28 2005
+# Wed May 18 16:34:49 2005
#
CONFIG_X86=y
CONFIG_MMU=y
#
# Automatically generated make config: don't edit
# Linux kernel version: 2.6.10-ac12
-# Tue Feb 15 15:58:29 2005
+# Wed May 18 16:34:49 2005
#
CONFIG_X86=y
CONFIG_MMU=y
#
# Automatically generated make config: don't edit
# Linux kernel version: 2.6.10-ac12
-# Tue Feb 15 15:58:29 2005
+# Wed May 18 16:34:50 2005
#
CONFIG_X86=y
CONFIG_MMU=y
__copy_to_user(arg1, arg2, arg3)
#define DRM_GET_USER_UNCHECKED(val, uaddr) \
__get_user(val, uaddr)
-#define DRM_PUT_USER_UNCHECKED(uaddr, val) \
- __put_user(val, uaddr)
-
/** 'malloc' without the overhead of DRM(alloc)() */
#define DRM_MALLOC(x) kmalloc(x, GFP_KERNEL)
} while (0)
-#define OUT_RING_USER_TABLE( tab, sz ) do { \
+#define OUT_RING_TABLE( tab, sz ) do { \
int _size = (sz); \
- int __user *_tab = (tab); \
+ int *_tab = (int *)(tab); \
\
if (write + _size > mask) { \
- int i = (mask+1) - write; \
- if (DRM_COPY_FROM_USER_UNCHECKED( (int *)(ring+write), \
- _tab, i*4 )) \
- return DRM_ERR(EFAULT); \
+ int _i = (mask+1) - write; \
+ _size -= _i; \
+ while (_i > 0 ) { \
+ *(int *)(ring + write) = *_tab++; \
+ write++; \
+ _i--; \
+ } \
write = 0; \
- _size -= i; \
- _tab += i; \
+ _tab += _i; \
} \
\
- if (_size && DRM_COPY_FROM_USER_UNCHECKED( (int *)(ring+write), \
- _tab, _size*4 )) \
- return DRM_ERR(EFAULT); \
- \
- write += _size; \
+ while (_size > 0) { \
+ *(ring + write) = *_tab++; \
+ write++; \
+ _size--; \
+ } \
write &= mask; \
} while (0)
return 0;
}
-static __inline__ int radeon_check_and_fixup_offset_user( drm_radeon_private_t *dev_priv,
- drm_file_t *filp_priv,
- u32 __user *offset ) {
- u32 off;
-
- DRM_GET_USER_UNCHECKED( off, offset );
-
- if ( radeon_check_and_fixup_offset( dev_priv, filp_priv, &off ) )
- return DRM_ERR( EINVAL );
-
- DRM_PUT_USER_UNCHECKED( offset, off );
-
- return 0;
-}
-
static __inline__ int radeon_check_and_fixup_packets( drm_radeon_private_t *dev_priv,
drm_file_t *filp_priv,
int id,
switch ( id ) {
case RADEON_EMIT_PP_MISC:
- if ( radeon_check_and_fixup_offset_user( dev_priv, filp_priv,
- &data[( RADEON_RB3D_DEPTHOFFSET
- - RADEON_PP_MISC ) / 4] ) ) {
+ if ( radeon_check_and_fixup_offset( dev_priv, filp_priv,
+ &data[( RADEON_RB3D_DEPTHOFFSET
+ - RADEON_PP_MISC ) / 4] ) ) {
DRM_ERROR( "Invalid depth buffer offset\n" );
return DRM_ERR( EINVAL );
}
break;
case RADEON_EMIT_PP_CNTL:
- if ( radeon_check_and_fixup_offset_user( dev_priv, filp_priv,
- &data[( RADEON_RB3D_COLOROFFSET
- - RADEON_PP_CNTL ) / 4] ) ) {
+ if ( radeon_check_and_fixup_offset( dev_priv, filp_priv,
+ &data[( RADEON_RB3D_COLOROFFSET
+ - RADEON_PP_CNTL ) / 4] ) ) {
DRM_ERROR( "Invalid colour buffer offset\n" );
return DRM_ERR( EINVAL );
}
case R200_EMIT_PP_TXOFFSET_3:
case R200_EMIT_PP_TXOFFSET_4:
case R200_EMIT_PP_TXOFFSET_5:
- if ( radeon_check_and_fixup_offset_user( dev_priv, filp_priv,
- &data[0] ) ) {
+ if ( radeon_check_and_fixup_offset( dev_priv, filp_priv,
+ &data[0] ) ) {
DRM_ERROR( "Invalid R200 texture offset\n" );
return DRM_ERR( EINVAL );
}
case RADEON_EMIT_PP_TXFILTER_0:
case RADEON_EMIT_PP_TXFILTER_1:
case RADEON_EMIT_PP_TXFILTER_2:
- if ( radeon_check_and_fixup_offset_user( dev_priv, filp_priv,
- &data[( RADEON_PP_TXOFFSET_0
- - RADEON_PP_TXFILTER_0 ) / 4] ) ) {
+ if ( radeon_check_and_fixup_offset( dev_priv, filp_priv,
+ &data[( RADEON_PP_TXOFFSET_0
+ - RADEON_PP_TXFILTER_0 ) / 4] ) ) {
DRM_ERROR( "Invalid R100 texture offset\n" );
return DRM_ERR( EINVAL );
}
case R200_EMIT_PP_CUBIC_OFFSETS_5: {
int i;
for ( i = 0; i < 5; i++ ) {
- if ( radeon_check_and_fixup_offset_user( dev_priv,
- filp_priv,
- &data[i] ) ) {
+ if ( radeon_check_and_fixup_offset( dev_priv, filp_priv,
+ &data[i] ) ) {
DRM_ERROR( "Invalid R200 cubic texture offset\n" );
return DRM_ERR( EINVAL );
}
drm_file_t *filp_priv,
drm_radeon_cmd_buffer_t *cmdbuf,
unsigned int *cmdsz ) {
- u32 tmp[4];
- u32 __user *cmd = (u32 __user *)cmdbuf->buf;
-
- if ( DRM_COPY_FROM_USER_UNCHECKED( tmp, cmd, sizeof( tmp ) ) ) {
- DRM_ERROR( "Failed to copy data from user space\n" );
- return DRM_ERR( EFAULT );
- }
+ u32 *cmd = (u32 *) cmdbuf->buf;
- *cmdsz = 2 + ( ( tmp[0] & RADEON_CP_PACKET_COUNT_MASK ) >> 16 );
+ *cmdsz = 2 + ( ( cmd[0] & RADEON_CP_PACKET_COUNT_MASK ) >> 16 );
- if ( ( tmp[0] & 0xc0000000 ) != RADEON_CP_PACKET3 ) {
+ if ( ( cmd[0] & 0xc0000000 ) != RADEON_CP_PACKET3 ) {
DRM_ERROR( "Not a type 3 packet\n" );
return DRM_ERR( EINVAL );
}
}
/* Check client state and fix it up if necessary */
- if ( tmp[0] & 0x8000 ) { /* MSB of opcode: next DWORD GUI_CNTL */
+ if ( cmd[0] & 0x8000 ) { /* MSB of opcode: next DWORD GUI_CNTL */
u32 offset;
- if ( tmp[1] & ( RADEON_GMC_SRC_PITCH_OFFSET_CNTL
+ if ( cmd[1] & ( RADEON_GMC_SRC_PITCH_OFFSET_CNTL
| RADEON_GMC_DST_PITCH_OFFSET_CNTL ) ) {
- offset = tmp[2] << 10;
+ offset = cmd[2] << 10;
if ( radeon_check_and_fixup_offset( dev_priv, filp_priv, &offset ) ) {
DRM_ERROR( "Invalid first packet offset\n" );
return DRM_ERR( EINVAL );
}
- tmp[2] = ( tmp[2] & 0xffc00000 ) | offset >> 10;
+ cmd[2] = ( cmd[2] & 0xffc00000 ) | offset >> 10;
}
- if ( ( tmp[1] & RADEON_GMC_SRC_PITCH_OFFSET_CNTL ) &&
- ( tmp[1] & RADEON_GMC_DST_PITCH_OFFSET_CNTL ) ) {
- offset = tmp[3] << 10;
+ if ( ( cmd[1] & RADEON_GMC_SRC_PITCH_OFFSET_CNTL ) &&
+ ( cmd[1] & RADEON_GMC_DST_PITCH_OFFSET_CNTL ) ) {
+ offset = cmd[3] << 10;
if ( radeon_check_and_fixup_offset( dev_priv, filp_priv, &offset ) ) {
DRM_ERROR( "Invalid second packet offset\n" );
return DRM_ERR( EINVAL );
}
- tmp[3] = ( tmp[3] & 0xffc00000 ) | offset >> 10;
- }
-
- if ( DRM_COPY_TO_USER_UNCHECKED( cmd, tmp, sizeof( tmp ) ) ) {
- DRM_ERROR( "Failed to copy data to user space\n" );
- return DRM_ERR( EFAULT );
+ cmd[3] = ( cmd[3] & 0xffc00000 ) | offset >> 10;
}
}
{
int id = (int)header.packet.packet_id;
int sz, reg;
- int __user *data = (int __user *)cmdbuf->buf;
+ int *data = (int *)cmdbuf->buf;
RING_LOCALS;
if (id >= RADEON_MAX_STATE_PACKETS)
BEGIN_RING(sz+1);
OUT_RING( CP_PACKET0( reg, (sz-1) ) );
- OUT_RING_USER_TABLE( data, sz );
+ OUT_RING_TABLE( data, sz );
ADVANCE_RING();
cmdbuf->buf += sz * sizeof(int);
drm_radeon_cmd_buffer_t *cmdbuf )
{
int sz = header.scalars.count;
- int __user *data = (int __user *)cmdbuf->buf;
int start = header.scalars.offset;
int stride = header.scalars.stride;
RING_LOCALS;
OUT_RING( CP_PACKET0( RADEON_SE_TCL_SCALAR_INDX_REG, 0 ) );
OUT_RING( start | (stride << RADEON_SCAL_INDX_DWORD_STRIDE_SHIFT));
OUT_RING( CP_PACKET0_TABLE( RADEON_SE_TCL_SCALAR_DATA_REG, sz-1 ) );
- OUT_RING_USER_TABLE( data, sz );
+ OUT_RING_TABLE( cmdbuf->buf, sz );
ADVANCE_RING();
cmdbuf->buf += sz * sizeof(int);
cmdbuf->bufsz -= sz * sizeof(int);
drm_radeon_cmd_buffer_t *cmdbuf )
{
int sz = header.scalars.count;
- int __user *data = (int __user *)cmdbuf->buf;
int start = ((unsigned int)header.scalars.offset) + 0x100;
int stride = header.scalars.stride;
RING_LOCALS;
OUT_RING( CP_PACKET0( RADEON_SE_TCL_SCALAR_INDX_REG, 0 ) );
OUT_RING( start | (stride << RADEON_SCAL_INDX_DWORD_STRIDE_SHIFT));
OUT_RING( CP_PACKET0_TABLE( RADEON_SE_TCL_SCALAR_DATA_REG, sz-1 ) );
- OUT_RING_USER_TABLE( data, sz );
+ OUT_RING_TABLE( cmdbuf->buf, sz );
ADVANCE_RING();
cmdbuf->buf += sz * sizeof(int);
cmdbuf->bufsz -= sz * sizeof(int);
drm_radeon_cmd_buffer_t *cmdbuf )
{
int sz = header.vectors.count;
- int __user *data = (int __user *)cmdbuf->buf;
int start = header.vectors.offset;
int stride = header.vectors.stride;
RING_LOCALS;
OUT_RING( CP_PACKET0( RADEON_SE_TCL_VECTOR_INDX_REG, 0 ) );
OUT_RING( start | (stride << RADEON_VEC_INDX_OCTWORD_STRIDE_SHIFT));
OUT_RING( CP_PACKET0_TABLE( RADEON_SE_TCL_VECTOR_DATA_REG, (sz-1) ) );
- OUT_RING_USER_TABLE( data, sz );
+ OUT_RING_TABLE( cmdbuf->buf, sz );
ADVANCE_RING();
cmdbuf->buf += sz * sizeof(int);
{
drm_radeon_private_t *dev_priv = dev->dev_private;
unsigned int cmdsz;
- int __user *cmd = (int __user *)cmdbuf->buf;
int ret;
RING_LOCALS;
}
BEGIN_RING( cmdsz );
- OUT_RING_USER_TABLE( cmd, cmdsz );
+ OUT_RING_TABLE( cmdbuf->buf, cmdsz );
ADVANCE_RING();
cmdbuf->buf += cmdsz * 4;
drm_radeon_private_t *dev_priv = dev->dev_private;
drm_clip_rect_t box;
unsigned int cmdsz;
- int __user *cmd = (int __user *)cmdbuf->buf;
int ret;
drm_clip_rect_t __user *boxes = cmdbuf->boxes;
int i = 0;
do {
if ( i < cmdbuf->nbox ) {
- if (DRM_COPY_FROM_USER_UNCHECKED( &box, &boxes[i], sizeof(box) ))
+ if (DRM_COPY_FROM_USER( &box, &boxes[i], sizeof(box) ))
return DRM_ERR(EFAULT);
/* FIXME The second and subsequent times round
* this loop, send a WAIT_UNTIL_3D_IDLE before
}
BEGIN_RING( cmdsz );
- OUT_RING_USER_TABLE( cmd, cmdsz );
+ OUT_RING_TABLE( cmdbuf->buf, cmdsz );
ADVANCE_RING();
} while ( ++i < cmdbuf->nbox );
int idx;
drm_radeon_cmd_buffer_t cmdbuf;
drm_radeon_cmd_header_t header;
- int orig_nbox;
+ int orig_nbox, orig_bufsz;
+ char *kbuf=NULL;
LOCK_TEST_WITH_RETURN( dev, filp );
RING_SPACE_TEST_WITH_RETURN( dev_priv );
VB_AGE_TEST_WITH_RETURN( dev_priv );
+ if (cmdbuf.bufsz > 64*1024 || cmdbuf.bufsz<0) {
+ return DRM_ERR(EINVAL);
+ }
- if (DRM_VERIFYAREA_READ( cmdbuf.buf, cmdbuf.bufsz ))
- return DRM_ERR(EFAULT);
-
- if (cmdbuf.nbox &&
- DRM_VERIFYAREA_READ(cmdbuf.boxes,
- cmdbuf.nbox * sizeof(drm_clip_rect_t)))
- return DRM_ERR(EFAULT);
+ /* Allocate an in-kernel area and copy in the cmdbuf. Do this to avoid
+ * races between checking values and using those values in other code,
+ * and simply to avoid a lot of function calls to copy in data.
+ */
+ orig_bufsz = cmdbuf.bufsz;
+ if (orig_bufsz != 0) {
+ kbuf = kmalloc(cmdbuf.bufsz, GFP_KERNEL);
+ if (kbuf == NULL)
+ return DRM_ERR(ENOMEM);
+ if (DRM_COPY_FROM_USER(kbuf, cmdbuf.buf, cmdbuf.bufsz))
+ return DRM_ERR(EFAULT);
+ cmdbuf.buf = kbuf;
+ }
orig_nbox = cmdbuf.nbox;
while ( cmdbuf.bufsz >= sizeof(header) ) {
-
- if (DRM_GET_USER_UNCHECKED( header.i, (int __user *)cmdbuf.buf )) {
- DRM_ERROR("__get_user %p\n", cmdbuf.buf);
- return DRM_ERR(EFAULT);
- }
+ header.i = *(int *)cmdbuf.buf;
cmdbuf.buf += sizeof(header);
cmdbuf.bufsz -= sizeof(header);
DRM_DEBUG("RADEON_CMD_PACKET\n");
if (radeon_emit_packets( dev_priv, filp_priv, header, &cmdbuf )) {
DRM_ERROR("radeon_emit_packets failed\n");
- return DRM_ERR(EINVAL);
+ goto err;
}
break;
DRM_DEBUG("RADEON_CMD_SCALARS\n");
if (radeon_emit_scalars( dev_priv, header, &cmdbuf )) {
DRM_ERROR("radeon_emit_scalars failed\n");
- return DRM_ERR(EINVAL);
+ goto err;
}
break;
DRM_DEBUG("RADEON_CMD_VECTORS\n");
if (radeon_emit_vectors( dev_priv, header, &cmdbuf )) {
DRM_ERROR("radeon_emit_vectors failed\n");
- return DRM_ERR(EINVAL);
+ goto err;
}
break;
if ( idx < 0 || idx >= dma->buf_count ) {
DRM_ERROR( "buffer index %d (of %d max)\n",
idx, dma->buf_count - 1 );
- return DRM_ERR(EINVAL);
+ goto err;
}
buf = dma->buflist[idx];
if ( buf->filp != filp || buf->pending ) {
DRM_ERROR( "bad buffer %p %p %d\n",
buf->filp, filp, buf->pending);
- return DRM_ERR(EINVAL);
+ goto err;
}
radeon_cp_discard_buffer( dev, buf );
DRM_DEBUG("RADEON_CMD_PACKET3\n");
if (radeon_emit_packet3( dev, filp_priv, &cmdbuf )) {
DRM_ERROR("radeon_emit_packet3 failed\n");
- return DRM_ERR(EINVAL);
+ goto err;
}
break;
DRM_DEBUG("RADEON_CMD_PACKET3_CLIP\n");
if (radeon_emit_packet3_cliprect( dev, filp_priv, &cmdbuf, orig_nbox )) {
DRM_ERROR("radeon_emit_packet3_clip failed\n");
- return DRM_ERR(EINVAL);
+ goto err;
}
break;
DRM_DEBUG("RADEON_CMD_SCALARS2\n");
if (radeon_emit_scalars2( dev_priv, header, &cmdbuf )) {
DRM_ERROR("radeon_emit_scalars2 failed\n");
- return DRM_ERR(EINVAL);
+ goto err;
}
break;
DRM_DEBUG("RADEON_CMD_WAIT\n");
if (radeon_emit_wait( dev, header.wait.flags )) {
DRM_ERROR("radeon_emit_wait failed\n");
- return DRM_ERR(EINVAL);
+ goto err;
}
break;
default:
DRM_ERROR("bad cmd_type %d at %p\n",
header.header.cmd_type,
cmdbuf.buf - sizeof(header));
- return DRM_ERR(EINVAL);
+ goto err;
}
}
+ if (orig_bufsz != 0)
+ kfree(kbuf);
DRM_DEBUG("DONE\n");
COMMIT_RING();
return 0;
+
+err:
+ if (orig_bufsz != 0)
+ kfree(kbuf);
+ return DRM_ERR(EINVAL);
}
struct completion event;
int ret;
- bio_get(bio);
-
rw |= (1 << BIO_RW_SYNC);
bio->bi_bdev = bdev;
data += 4;
dlen -= 4;
/* data[0] is code, data[1] is length */
- while (dlen >= 2 && dlen >= data[1]) {
+ while (dlen >= 2 && dlen >= data[1] && data[1] >= 2) {
switch (data[0]) {
case LCP_MRU:
val = (data[2] << 8) + data[3];
size_t len = count;
if (!(tun->flags & TUN_NO_PI)) {
- if ((len -= sizeof(pi)) > len)
+ if ((len -= sizeof(pi)) > count)
return -EINVAL;
if(memcpy_fromiovec((void *)&pi, iv, sizeof(pi)))
{"MAXTOR", "XT-4170S", "B5A", BLIST_NOLUN}, /* locks up */
{"MAXTOR", "XT-8760S", "B7B", BLIST_NOLUN}, /* locks up */
{"MEDIAVIS", "RENO CD-ROMX2A", "2.03", BLIST_NOLUN}, /* responds to all lun */
+ {"MICROTEK", "ScanMakerIII", "2.30", BLIST_NOLUN}, /* responds to all lun */
{"NEC", "CD-ROM DRIVE:841", "1.0", BLIST_NOLUN},/* locks up */
{"PHILIPS", "PCA80SC", "V4-2", BLIST_NOLUN}, /* responds to all lun */
{"RODIME", "RO3000S", "2.33", BLIST_NOLUN}, /* locks up */
* otherwise the new scheme is used. If that fails and "use_both_schemes"
* is set, then the driver will make another attempt, using the other scheme.
*/
-static int old_scheme_first = 0;
+static int old_scheme_first = 1;
module_param(old_scheme_first, bool, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(old_scheme_first,
"start with the old device initialization scheme");
static int load_elf_library(struct file *file)
{
struct elf_phdr *elf_phdata;
+ struct elf_phdr *eppnt;
unsigned long elf_bss, bss, len;
int retval, error, i, j;
struct elfhdr elf_ex;
/* j < ELF_MIN_ALIGN because elf_ex.e_phnum <= 2 */
error = -ENOMEM;
- elf_phdata = (struct elf_phdr *) kmalloc(j, GFP_KERNEL);
+ elf_phdata = kmalloc(j, GFP_KERNEL);
if (!elf_phdata)
goto out;
+ eppnt = elf_phdata;
error = -ENOEXEC;
- retval = kernel_read(file, elf_ex.e_phoff, (char *) elf_phdata, j);
+ retval = kernel_read(file, elf_ex.e_phoff, (char *)eppnt, j);
if (retval != j)
goto out_free_ph;
for (j = 0, i = 0; i<elf_ex.e_phnum; i++)
- if ((elf_phdata + i)->p_type == PT_LOAD) j++;
+ if ((eppnt + i)->p_type == PT_LOAD)
+ j++;
if (j != 1)
goto out_free_ph;
- while (elf_phdata->p_type != PT_LOAD) elf_phdata++;
+ while (eppnt->p_type != PT_LOAD)
+ eppnt++;
/* Now use mmap to map the library into memory. */
down_write(¤t->mm->mmap_sem);
error = do_mmap(file,
- ELF_PAGESTART(elf_phdata->p_vaddr),
- (elf_phdata->p_filesz +
- ELF_PAGEOFFSET(elf_phdata->p_vaddr)),
+ ELF_PAGESTART(eppnt->p_vaddr),
+ (eppnt->p_filesz +
+ ELF_PAGEOFFSET(eppnt->p_vaddr)),
PROT_READ | PROT_WRITE | PROT_EXEC,
MAP_FIXED | MAP_PRIVATE | MAP_DENYWRITE,
- (elf_phdata->p_offset -
- ELF_PAGEOFFSET(elf_phdata->p_vaddr)));
+ (eppnt->p_offset -
+ ELF_PAGEOFFSET(eppnt->p_vaddr)));
up_write(¤t->mm->mmap_sem);
- if (error != ELF_PAGESTART(elf_phdata->p_vaddr))
+ if (error != ELF_PAGESTART(eppnt->p_vaddr))
goto out_free_ph;
- elf_bss = elf_phdata->p_vaddr + elf_phdata->p_filesz;
+ elf_bss = eppnt->p_vaddr + eppnt->p_filesz;
padzero(elf_bss);
- len = ELF_PAGESTART(elf_phdata->p_filesz + elf_phdata->p_vaddr + ELF_MIN_ALIGN - 1);
- bss = elf_phdata->p_memsz + elf_phdata->p_vaddr;
+ len = ELF_PAGESTART(eppnt->p_filesz + eppnt->p_vaddr + ELF_MIN_ALIGN - 1);
+ bss = eppnt->p_memsz + eppnt->p_vaddr;
if (bss > len)
do_brk(len, bss - len);
error = 0;
inode->i_data.a_ops = &cramfs_aops;
} else {
inode->i_size = 0;
+ inode->i_blocks = 0;
init_special_inode(inode, inode->i_mode,
old_decode_dev(cramfs_inode->size));
}
return error;
}
+#define MAX_EVENTS (INT_MAX / sizeof(struct epoll_event))
/*
* Implement the event wait interface for the eventpoll file. It is the kernel
current, epfd, events, maxevents, timeout));
/* The maximum number of event must be greater than zero */
- if (maxevents <= 0)
+ if (maxevents <= 0 || maxevents > MAX_EVENTS)
return -EINVAL;
/* Verify that the area passed by the user is writeable */
{
/* buf must be at least sizeof(tsk->comm) in size */
task_lock(tsk);
- memcpy(buf, tsk->comm, sizeof(tsk->comm));
+ strncpy(buf, tsk->comm, sizeof(tsk->comm));
task_unlock(tsk);
}
goto fail;
}
kaddr = kmap_atomic(page, KM_USER0);
+ memset(kaddr, 0, chunk_size);
de = (struct ext2_dir_entry_2 *)kaddr;
de->name_len = 1;
de->rec_len = cpu_to_le16(EXT2_DIR_REC_LEN(1));
sbi->s_log_zone_size = isonum_723 (h_pri->logical_block_size);
sbi->s_max_size = isonum_733(h_pri->volume_space_size);
} else {
+ if (!pri)
+ goto out_freebh;
rootp = (struct iso_directory_record *) pri->root_directory_record;
sbi->s_nzones = isonum_733 (pri->volume_space_size);
sbi->s_log_zone_size = isonum_723 (pri->logical_block_size);
struct inode *inode;
struct isofs_iget5_callback_data data;
+ if (offset >= 1ul << sb->s_blocksize_bits)
+ return NULL;
+
data.block = block;
data.offset = offset;
if(LEN & 1) LEN++; \
CHR = ((unsigned char *) DE) + LEN; \
LEN = *((unsigned char *) DE) - LEN; \
+ if (LEN<0) LEN=0; \
if (ISOFS_SB(inode->i_sb)->s_rock_offset!=-1) \
{ \
LEN-=ISOFS_SB(inode->i_sb)->s_rock_offset; \
offset1 = 0; \
pbh = sb_bread(DEV->i_sb, block); \
if(pbh){ \
+ if (offset > pbh->b_size || offset + cont_size > pbh->b_size){ \
+ brelse(pbh); \
+ goto out; \
+ } \
memcpy(buffer + offset1, pbh->b_data + offset, cont_size - offset1); \
brelse(pbh); \
chr = (unsigned char *) buffer; \
struct rock_ridge * rr;
int sig;
- while (len > 1){ /* There may be one byte for padding somewhere */
+ while (len > 2){ /* There may be one byte for padding somewhere */
rr = (struct rock_ridge *) chr;
- if (rr->len == 0) goto out; /* Something got screwed up here */
+ if (rr->len < 3) goto out; /* Something got screwed up here */
sig = isonum_721(chr);
chr += rr->len;
len -= rr->len;
+ if (len < 0) goto out; /* corrupted isofs */
switch(sig){
case SIG('R','R'):
break;
case SIG('N','M'):
if (truncate) break;
+ if (rr->len < 5) break;
/*
* If the flags are 2 or 4, this indicates '.' or '..'.
* We don't want to do anything with this, because it
struct rock_ridge * rr;
int rootflag;
- while (len > 1){ /* There may be one byte for padding somewhere */
+ while (len > 2){ /* There may be one byte for padding somewhere */
rr = (struct rock_ridge *) chr;
- if (rr->len == 0) goto out; /* Something got screwed up here */
+ if (rr->len < 3) goto out; /* Something got screwed up here */
sig = isonum_721(chr);
chr += rr->len;
len -= rr->len;
+ if (len < 0) goto out; /* corrupted isofs */
switch(sig){
#ifndef CONFIG_ZISOFS /* No flag for SF or ZF */
struct rock_ridge *rr;
if (!ISOFS_SB(inode->i_sb)->s_rock)
- panic ("Cannot have symlink with high sierra variant of iso filesystem\n");
+ goto error;
block = ei->i_iget5_block;
lock_kernel();
SETUP_ROCK_RIDGE(raw_inode, chr, len);
repeat:
- while (len > 1) { /* There may be one byte for padding somewhere */
+ while (len > 2) { /* There may be one byte for padding somewhere */
rr = (struct rock_ridge *) chr;
- if (rr->len == 0)
+ if (rr->len < 3)
goto out; /* Something got screwed up here */
sig = isonum_721(chr);
chr += rr->len;
len -= rr->len;
+ if (len < 0)
+ goto out; /* corrupted isofs */
switch (sig) {
case SIG('R', 'R'):
fail:
brelse(bh);
unlock_kernel();
+ error:
SetPageError(page);
kunmap(page);
unlock_page(page);
/* Fake conntrack entry for untracked connections */
extern struct ip_conntrack ip_conntrack_untracked;
-extern int ip_ct_no_defrag;
/* Returns new sk_buff, or NULL */
struct sk_buff *
-ip_ct_gather_frags(struct sk_buff *skb);
+ip_ct_gather_frags(struct sk_buff *skb, u_int32_t user);
/* Delete all conntracks which match. */
extern void
/*
* Functions provided by ip_fragment.o
*/
-
-struct sk_buff *ip_defrag(struct sk_buff *skb);
-extern void ipfrag_flush(void);
+
+enum ip_defrag_users
+{
+ IP_DEFRAG_LOCAL_DELIVER,
+ IP_DEFRAG_CALL_RA_CHAIN,
+ IP_DEFRAG_CONNTRACK_IN,
+ IP_DEFRAG_CONNTRACK_OUT,
+ IP_DEFRAG_NAT_OUT,
+ IP_DEFRAG_VS_IN,
+ IP_DEFRAG_VS_OUT,
+ IP_DEFRAG_VS_FWD,
+ __IP_DEFRAG_DYNAMIC_FIRST,
+ __IP_DEFRAG_DYNAMIC_LAST = (__IP_DEFRAG_DYNAMIC_FIRST + 32) - 1,
+};
+
+struct sk_buff *ip_defrag(struct sk_buff *skb, u32 user);
+extern int ip_defrag_user_id_alloc(void);
+extern void ip_defrag_user_id_free(int user);
extern int ip_frag_nqueues;
extern atomic_t ip_frag_mem;
address += 4 + PAGE_SIZE - 1;
address &= PAGE_MASK;
grow = (address - vma->vm_end) >> PAGE_SHIFT;
-
- if (address < vma->vm_end) {
+
+ if(address < vma->vm_end) {
anon_vma_unlock(vma);
return 0;
}
+
/* Overcommit.. vx check first to avoid vm_unacct_memory() */
if (!vx_vmpages_avail(vma->vm_mm, grow) ||
security_vm_enough_memory(grow)) {
anon_vma_unlock(vma);
return 0;
}
+
/* Overcommit.. vx check first to avoid vm_unacct_memory() */
if (!vx_vmpages_avail(vma->vm_mm, grow) ||
security_vm_enough_memory(grow)) {
}
-int atm_get_addr(struct atm_dev *dev,struct sockaddr_atmsvc __user *buf,int size)
+int atm_get_addr(struct atm_dev *dev,struct sockaddr_atmsvc __user *buf,size_t size)
{
unsigned long flags;
struct atm_dev_addr *walk;
void atm_reset_addr(struct atm_dev *dev);
int atm_add_addr(struct atm_dev *dev,struct sockaddr_atmsvc *addr);
int atm_del_addr(struct atm_dev *dev,struct sockaddr_atmsvc *addr);
-int atm_get_addr(struct atm_dev *dev,struct sockaddr_atmsvc __user *buf,int size);
+int atm_get_addr(struct atm_dev *dev,struct sockaddr_atmsvc __user *buf,size_t size);
#endif
int bt_sock_register(int proto, struct net_proto_family *ops)
{
- if (proto >= BT_MAX_PROTO)
+ if (proto < 0 || proto >= BT_MAX_PROTO)
return -EINVAL;
if (bt_proto[proto])
int bt_sock_unregister(int proto)
{
- if (proto >= BT_MAX_PROTO)
+ if (proto < 0 || proto >= BT_MAX_PROTO)
return -EINVAL;
if (!bt_proto[proto])
{
int err = 0;
- if (proto >= BT_MAX_PROTO)
+ if (proto < 0 || proto >= BT_MAX_PROTO)
return -EINVAL;
#if defined(CONFIG_KMOD)
struct ipq {
struct ipq *next; /* linked list pointers */
struct list_head lru_list; /* lru list member */
+ u32 user;
u32 saddr;
u32 daddr;
u16 id;
/* Memory limiting on fragments. Evictor trashes the oldest
* fragment queue until we are back under the threshold.
*/
-static void __ip_evictor(int threshold)
+static void ip_evictor(void)
{
struct ipq *qp;
struct list_head *tmp;
int work;
- work = atomic_read(&ip_frag_mem) - threshold;
+ work = atomic_read(&ip_frag_mem) - sysctl_ipfrag_low_thresh;
if (work <= 0)
return;
}
}
-static inline void ip_evictor(void)
-{
- __ip_evictor(sysctl_ipfrag_low_thresh);
-}
-
/*
* Oops, a fragment queue timed out. Kill it and send an ICMP reply.
*/
if(qp->id == qp_in->id &&
qp->saddr == qp_in->saddr &&
qp->daddr == qp_in->daddr &&
- qp->protocol == qp_in->protocol) {
+ qp->protocol == qp_in->protocol &&
+ qp->user == qp_in->user) {
atomic_inc(&qp->refcnt);
write_unlock(&ipfrag_lock);
qp_in->last_in |= COMPLETE;
}
/* Add an entry to the 'ipq' queue for a newly received IP datagram. */
-static struct ipq *ip_frag_create(unsigned hash, struct iphdr *iph)
+static struct ipq *ip_frag_create(unsigned hash, struct iphdr *iph, u32 user)
{
struct ipq *qp;
qp->id = iph->id;
qp->saddr = iph->saddr;
qp->daddr = iph->daddr;
+ qp->user = user;
qp->len = 0;
qp->meat = 0;
qp->fragments = NULL;
/* Find the correct entry in the "incomplete datagrams" queue for
* this IP datagram, and create new one, if nothing is found.
*/
-static inline struct ipq *ip_find(struct iphdr *iph)
+static inline struct ipq *ip_find(struct iphdr *iph, u32 user)
{
__u16 id = iph->id;
__u32 saddr = iph->saddr;
if(qp->id == id &&
qp->saddr == saddr &&
qp->daddr == daddr &&
- qp->protocol == protocol) {
+ qp->protocol == protocol &&
+ qp->user == user) {
atomic_inc(&qp->refcnt);
read_unlock(&ipfrag_lock);
return qp;
}
read_unlock(&ipfrag_lock);
- return ip_frag_create(hash, iph);
+ return ip_frag_create(hash, iph, user);
}
/* Add new segment to existing queue. */
}
/* Process an incoming IP datagram fragment. */
-struct sk_buff *ip_defrag(struct sk_buff *skb)
+struct sk_buff *ip_defrag(struct sk_buff *skb, u32 user)
{
struct iphdr *iph = skb->nh.iph;
struct ipq *qp;
dev = skb->dev;
/* Lookup (or create) queue header */
- if ((qp = ip_find(iph)) != NULL) {
+ if ((qp = ip_find(iph, user)) != NULL) {
struct sk_buff *ret = NULL;
spin_lock(&qp->lock);
return NULL;
}
+static unsigned long ip_defrag_id_bitmap;
+
+int ip_defrag_user_id_alloc(void)
+{
+ int i;
+
+ for (i = 0; i < 32; i++) {
+ if (!test_and_set_bit(i, &ip_defrag_id_bitmap))
+ return i + __IP_DEFRAG_DYNAMIC_FIRST;
+ }
+
+ return -ENFILE;
+}
+EXPORT_SYMBOL(ip_defrag_user_id_alloc);
+
+void ip_defrag_user_id_free(int user)
+{
+ user -= __IP_DEFRAG_DYNAMIC_FIRST;
+ if (user >= 0 && user < 32)
+ clear_bit(user, &ip_defrag_id_bitmap);
+}
+EXPORT_SYMBOL(ip_defrag_user_id_free);
+
void ipfrag_init(void)
{
ipfrag_hash_rnd = (u32) ((num_physpages ^ (num_physpages>>7)) ^
add_timer(&ipfrag_secret_timer);
}
-void ipfrag_flush(void)
-{
- __ip_evictor(0);
-}
-
EXPORT_SYMBOL(ip_defrag);
-EXPORT_SYMBOL(ipfrag_flush);
(!sk->sk_bound_dev_if ||
sk->sk_bound_dev_if == skb->dev->ifindex)) {
if (skb->nh.iph->frag_off & htons(IP_MF|IP_OFFSET)) {
- skb = ip_defrag(skb);
+ skb = ip_defrag(skb, IP_DEFRAG_CALL_RA_CHAIN);
if (skb == NULL) {
read_unlock(&ip_ra_lock);
return 1;
*/
if (skb->nh.iph->frag_off & htons(IP_MF|IP_OFFSET)) {
- skb = ip_defrag(skb);
+ skb = ip_defrag(skb, IP_DEFRAG_LOCAL_DELIVER);
if (!skb)
return 0;
}
to->priority = from->priority;
to->protocol = from->protocol;
to->security = from->security;
+ dst_release(to->dst);
to->dst = dst_clone(from->dst);
to->dev = from->dev;
}
static inline struct sk_buff *
-ip_vs_gather_frags(struct sk_buff *skb)
+ip_vs_gather_frags(struct sk_buff *skb, u_int32_t user)
{
- skb = ip_defrag(skb);
+ skb = ip_defrag(skb, user);
if (skb)
ip_send_check(skb->nh.iph);
return skb;
/* reassemble IP fragments */
if (skb->nh.iph->frag_off & __constant_htons(IP_MF|IP_OFFSET)) {
- skb = ip_vs_gather_frags(skb);
+ skb = ip_vs_gather_frags(skb, IP_DEFRAG_VS_OUT);
if (!skb)
return NF_STOLEN;
*pskb = skb;
/* reassemble IP fragments */
if (unlikely(iph->frag_off & __constant_htons(IP_MF|IP_OFFSET) &&
!pp->dont_defrag)) {
- skb = ip_vs_gather_frags(skb);
+ skb = ip_vs_gather_frags(skb, IP_DEFRAG_VS_OUT);
if (!skb)
return NF_STOLEN;
iph = skb->nh.iph;
* forward to the right destination host if relevant.
* Currently handles error types - unreachable, quench, ttl exceeded.
*/
-static int ip_vs_in_icmp(struct sk_buff **pskb, int *related)
+static int
+ip_vs_in_icmp(struct sk_buff **pskb, int *related, unsigned int hooknum)
{
struct sk_buff *skb = *pskb;
struct iphdr *iph;
/* reassemble IP fragments */
if (skb->nh.iph->frag_off & __constant_htons(IP_MF|IP_OFFSET)) {
- skb = ip_vs_gather_frags(skb);
+ skb = ip_vs_gather_frags(skb,
+ hooknum == NF_IP_LOCAL_IN ?
+ IP_DEFRAG_VS_IN : IP_DEFRAG_VS_FWD);
if (!skb)
return NF_STOLEN;
*pskb = skb;
iph = skb->nh.iph;
if (unlikely(iph->protocol == IPPROTO_ICMP)) {
- int related, verdict = ip_vs_in_icmp(pskb, &related);
+ int related, verdict = ip_vs_in_icmp(pskb, &related, hooknum);
if (related)
return verdict;
if ((*pskb)->nh.iph->protocol != IPPROTO_ICMP)
return NF_ACCEPT;
- return ip_vs_in_icmp(pskb, &r);
+ return ip_vs_in_icmp(pskb, &r, hooknum);
}
}
}
-int ip_ct_no_defrag;
-
/* Returns new sk_buff, or NULL */
struct sk_buff *
-ip_ct_gather_frags(struct sk_buff *skb)
+ip_ct_gather_frags(struct sk_buff *skb, u_int32_t user)
{
struct sock *sk = skb->sk;
#ifdef CONFIG_NETFILTER_DEBUG
unsigned int olddebug = skb->nf_debug;
#endif
- if (unlikely(ip_ct_no_defrag)) {
- kfree_skb(skb);
- return NULL;
- }
-
if (sk) {
sock_hold(sk);
skb_orphan(skb);
}
local_bh_disable();
- skb = ip_defrag(skb);
+ skb = ip_defrag(skb, user);
local_bh_enable();
if (!skb) {
/* Gather fragments. */
if ((*pskb)->nh.iph->frag_off & htons(IP_MF|IP_OFFSET)) {
- *pskb = ip_ct_gather_frags(*pskb);
+ *pskb = ip_ct_gather_frags(*pskb,
+ hooknum == NF_IP_PRE_ROUTING ?
+ IP_DEFRAG_CONNTRACK_IN :
+ IP_DEFRAG_CONNTRACK_OUT);
if (!*pskb)
return NF_STOLEN;
}
cleanup_defraglocalops:
nf_unregister_hook(&ip_conntrack_defrag_local_out_ops);
cleanup_defragops:
- /* Frag queues may hold fragments with skb->dst == NULL */
- ip_ct_no_defrag = 1;
- synchronize_net();
- local_bh_disable();
- ipfrag_flush();
- local_bh_enable();
nf_unregister_hook(&ip_conntrack_defrag_ops);
cleanup_proc_stat:
#ifdef CONFIG_PROC_FS
I'm starting to have nightmares about fragments. */
if ((*pskb)->nh.iph->frag_off & htons(IP_MF|IP_OFFSET)) {
- *pskb = ip_ct_gather_frags(*pskb);
+ *pskb = ip_ct_gather_frags(*pskb, IP_DEFRAG_NAT_OUT);
if (!*pskb)
return NF_STOLEN;
to->priority = from->priority;
to->protocol = from->protocol;
to->security = from->security;
+ dst_release(to->dst);
to->dst = dst_clone(from->dst);
to->dev = from->dev;
%define kversion 2.6.%{sublevel}
%define rpmversion 2.6.%{sublevel}
%define rhbsys %([ -r /etc/beehive-root ] && echo || echo .`whoami`)
-%define release 1.14_FC2.2.planetlab%{?date:.%{date}}
+
+%define release 1.771_FC2.1.planetlab%{?date:.%{date}}
+
%define signmodules 0
+%define make_target bzImage
%define KVERREL %{PACKAGE_VERSION}-%{PACKAGE_RELEASE}
make -s nonint_oldconfig > /dev/null
make -s include/linux/version.h
- make -s %{?_smp_mflags} bzImage
+ make -s %{?_smp_mflags} %{make_target}
make -s %{?_smp_mflags} modules || exit 1
make buildcheck