View | Details | Raw Unified | Return to bug 26192 | Differences between
and this patch

Collapse All | Expand All

(-)a/fs/ext4/ext4.h (+11 lines)
 Lines 781-786   struct ext4_inode_info { Link Here 
781
	/* on-disk additional length */
781
	/* on-disk additional length */
782
	__u16 i_extra_isize;
782
	__u16 i_extra_isize;
783
783
784
	atomic_t i_aiodio_unwritten; /* Nr. of inflight conversions pending */
785
784
	spinlock_t i_block_reservation_lock;
786
	spinlock_t i_block_reservation_lock;
785
#ifdef CONFIG_QUOTA
787
#ifdef CONFIG_QUOTA
786
	/* quota space reservation, managed internally by quota code */
788
	/* quota space reservation, managed internally by quota code */
 Lines 1889-1894   static inline void set_bitmap_uptodate(struct buffer_head *bh) Link Here 
1889
1891
1890
#define in_range(b, first, len)	((b) >= (first) && (b) <= (first) + (len) - 1)
1892
#define in_range(b, first, len)	((b) >= (first) && (b) <= (first) + (len) - 1)
1891
1893
1894
/* For ioend & aio unwritten conversion wait queues */
1895
#define EXT4_WQ_HASH_SZ		37
1896
#define ext4_ioend_wq(v)   (&ext4__ioend_wq[((unsigned long)(v)) %\
1897
					    EXT4_WQ_HASH_SZ])
1898
#define ext4_aio_mutex(v)  (&ext4__aio_mutex[((unsigned long)(v)) %\
1899
					     EXT4_WQ_HASH_SZ])
1900
extern wait_queue_head_t ext4__ioend_wq[EXT4_WQ_HASH_SZ];
1901
extern struct mutex ext4__aio_mutex[EXT4_WQ_HASH_SZ];
1902
1892
#endif	/* __KERNEL__ */
1903
#endif	/* __KERNEL__ */
1893
1904
1894
#endif	/* _EXT4_H */
1905
#endif	/* _EXT4_H */
(-)a/fs/ext4/extents.c (-4 / +6 lines)
 Lines 3118-3126   ext4_ext_handle_uninitialized_extents(handle_t *handle, struct inode *inode, Link Here 
3118
		 * that this IO needs to convertion to written when IO is
3118
		 * that this IO needs to convertion to written when IO is
3119
		 * completed
3119
		 * completed
3120
		 */
3120
		 */
3121
		if (io)
3121
		if (io && !(io->flag & DIO_AIO_UNWRITTEN)) {
3122
			io->flag = DIO_AIO_UNWRITTEN;
3122
			io->flag = DIO_AIO_UNWRITTEN;
3123
		else
3123
			atomic_inc(&EXT4_I(inode)->i_aiodio_unwritten);
3124
		} else
3124
			ext4_set_inode_state(inode, EXT4_STATE_DIO_UNWRITTEN);
3125
			ext4_set_inode_state(inode, EXT4_STATE_DIO_UNWRITTEN);
3125
		goto out;
3126
		goto out;
3126
	}
3127
	}
 Lines 3404-3412   int ext4_ext_get_blocks(handle_t *handle, struct inode *inode, Link Here 
3404
		 * that we need to perform convertion when IO is done.
3405
		 * that we need to perform convertion when IO is done.
3405
		 */
3406
		 */
3406
		if (flags == EXT4_GET_BLOCKS_DIO_CREATE_EXT) {
3407
		if (flags == EXT4_GET_BLOCKS_DIO_CREATE_EXT) {
3407
			if (io)
3408
			if (io && !(io->flag & DIO_AIO_UNWRITTEN)) {
3408
				io->flag = DIO_AIO_UNWRITTEN;
3409
				io->flag = DIO_AIO_UNWRITTEN;
3409
			else
3410
				atomic_inc(&EXT4_I(inode)->i_aiodio_unwritten);
3411
			} else
3410
				ext4_set_inode_state(inode,
3412
				ext4_set_inode_state(inode,
3411
						     EXT4_STATE_DIO_UNWRITTEN);
3413
						     EXT4_STATE_DIO_UNWRITTEN);
3412
		}
3414
		}
(-)a/fs/ext4/file.c (-1 / +59 lines)
 Lines 54-64   static int ext4_release_file(struct inode *inode, struct file *filp) Link Here 
54
	return 0;
54
	return 0;
55
}
55
}
56
56
57
static void ext4_aiodio_wait(struct inode *inode)
58
{
59
	wait_queue_head_t *wq = ext4_ioend_wq(inode);
60
61
	wait_event(*wq, (atomic_read(&EXT4_I(inode)->i_aiodio_unwritten) == 0));
62
}
63
64
/*
65
 * This tests whether the IO in question is block-aligned or not.
66
 * Ext4 utilizes unwritten extents when hole-filling during direct IO, and they
67
 * are converted to written only after the IO is complete.  Until they are
68
 * mapped, these blocks appear as holes, so dio_zero_block() will assume that
69
 * it needs to zero out portions of the start and/or end block.  If 2 AIO
70
 * threads are at work on the same unwritten block, they must be synchronized
71
 * or one thread will zero the other's data, causing corruption.
72
 */
73
static int
74
ext4_unaligned_aio(struct inode *inode, const struct iovec *iov,
75
		   unsigned long nr_segs, loff_t pos)
76
{
77
	struct super_block *sb = inode->i_sb;
78
	int blockmask = sb->s_blocksize - 1;
79
	size_t count = iov_length(iov, nr_segs);
80
	loff_t final_size = pos + count;
81
82
	if (pos >= inode->i_size)
83
		return 0;
84
85
	if ((pos & blockmask) || (final_size & blockmask))
86
		return 1;
87
88
	return 0;
89
}
90
57
static ssize_t
91
static ssize_t
58
ext4_file_write(struct kiocb *iocb, const struct iovec *iov,
92
ext4_file_write(struct kiocb *iocb, const struct iovec *iov,
59
		unsigned long nr_segs, loff_t pos)
93
		unsigned long nr_segs, loff_t pos)
60
{
94
{
61
	struct inode *inode = iocb->ki_filp->f_path.dentry->d_inode;
95
	struct inode *inode = iocb->ki_filp->f_path.dentry->d_inode;
96
	int unaligned_aio = 0;
97
	int ret;
62
98
63
	/*
99
	/*
64
	 * If we have encountered a bitmap-format file, the size limit
100
	 * If we have encountered a bitmap-format file, the size limit
 Lines 76-84   ext4_file_write(struct kiocb *iocb, const struct iovec *iov, Link Here 
76
			nr_segs = iov_shorten((struct iovec *)iov, nr_segs,
112
			nr_segs = iov_shorten((struct iovec *)iov, nr_segs,
77
					      sbi->s_bitmap_maxbytes - pos);
113
					      sbi->s_bitmap_maxbytes - pos);
78
		}
114
		}
115
	} else if (unlikely((iocb->ki_filp->f_flags & O_DIRECT) &&
116
		   !is_sync_kiocb(iocb))) {
117
		unaligned_aio = ext4_unaligned_aio(inode, iov, nr_segs, pos);
79
	}
118
	}
80
119
81
	return generic_file_aio_write(iocb, iov, nr_segs, pos);
120
	/* Unaligned direct AIO must be serialized; see comment above */
121
	if (unaligned_aio) {
122
		static unsigned long unaligned_warn_time;
123
124
		/* Warn about this once per day */
125
		if (printk_timed_ratelimit(&unaligned_warn_time, 60*60*24*HZ))
126
			ext4_msg(inode->i_sb, KERN_WARNING,
127
				 "Unaligned AIO/DIO on inode %ld by %s; "
128
				 "performance will be poor.",
129
				 inode->i_ino, current->comm);
130
		mutex_lock(ext4_aio_mutex(inode));
131
		ext4_aiodio_wait(inode);
132
	}
133
134
	ret = generic_file_aio_write(iocb, iov, nr_segs, pos);
135
136
	if (unaligned_aio)
137
		mutex_unlock(ext4_aio_mutex(inode));
138
139
	return ret;
82
}
140
}
83
141
84
static const struct vm_operations_struct ext4_file_vm_ops = {
142
static const struct vm_operations_struct ext4_file_vm_ops = {
(-)a/fs/ext4/super.c (-1 / +12 lines)
 Lines 713-718   static struct inode *ext4_alloc_inode(struct super_block *sb) Link Here 
713
	ei->cur_aio_dio = NULL;
713
	ei->cur_aio_dio = NULL;
714
	ei->i_sync_tid = 0;
714
	ei->i_sync_tid = 0;
715
	ei->i_datasync_tid = 0;
715
	ei->i_datasync_tid = 0;
716
	atomic_set(&ei->i_aiodio_unwritten, 0);
716
717
717
	return &ei->vfs_inode;
718
	return &ei->vfs_inode;
718
}
719
}
 Lines 4002-4012   static struct file_system_type ext4_fs_type = { Link Here 
4002
	.fs_flags	= FS_REQUIRES_DEV,
4003
	.fs_flags	= FS_REQUIRES_DEV,
4003
};
4004
};
4004
4005
4006
/* Shared across all ext4 file systems */
4007
wait_queue_head_t ext4__ioend_wq[EXT4_WQ_HASH_SZ];
4008
struct mutex ext4__aio_mutex[EXT4_WQ_HASH_SZ];
4009
4005
static int __init init_ext4_fs(void)
4010
static int __init init_ext4_fs(void)
4006
{
4011
{
4007
	int err;
4012
	int i, err;
4008
4013
4009
	ext4_check_flag_values();
4014
	ext4_check_flag_values();
4015
4016
	for (i = 0; i < EXT4_WQ_HASH_SZ; i++) {
4017
		mutex_init(&ext4__aio_mutex[i]);
4018
		init_waitqueue_head(&ext4__ioend_wq[i]);
4019
	}
4020
4010
	err = init_ext4_system_zone();
4021
	err = init_ext4_system_zone();
4011
	if (err)
4022
	if (err)
4012
		return err;
4023
		return err;

Return to bug 26192