2 * RelayFS buffer management and resizing code.
4 * Copyright (C) 2002, 2003 - Tom Zanussi (zanussi@us.ibm.com), IBM Corp
5 * Copyright (C) 1999, 2000, 2001, 2002 - Karim Yaghmour (karim@opersys.com)
7 * This file is released under the GPL.
10 #include <linux/module.h>
11 #include <linux/vmalloc.h>
13 #include <asm/relay.h>
17 * alloc_page_array - alloc array to hold pages, but not pages
18 * @size: the total size of the memory represented by the page array
19 * @page_count: the number of pages the array can hold
20 * @err: 0 on success, negative otherwise
22 * Returns a pointer to the page array if successful, NULL otherwise.
25 alloc_page_array(int size, int *page_count, int *err)
28 struct page **page_array;
33 size = PAGE_ALIGN(size);
34 n_pages = size >> PAGE_SHIFT;
35 page_array_size = n_pages * sizeof(struct page *);
36 page_array = kmalloc(page_array_size, GFP_KERNEL);
37 if (page_array == NULL) {
41 *page_count = n_pages;
42 memset(page_array, 0, page_array_size);
48 * free_page_array - free array to hold pages, but not pages
49 * @page_array: pointer to the page array
52 free_page_array(struct page **page_array)
58 * depopulate_page_array - free and unreserve all pages in the array
59 * @page_array: pointer to the page array
60 * @page_count: number of pages to free
63 depopulate_page_array(struct page **page_array, int page_count)
67 for (i = 0; i < page_count; i++) {
68 ClearPageReserved(page_array[i]);
69 __free_page(page_array[i]);
74 * populate_page_array - allocate and reserve pages
75 * @page_array: pointer to the page array
76 * @page_count: number of pages to allocate
78 * Returns 0 if successful, negative otherwise.
81 populate_page_array(struct page **page_array, int page_count)
85 for (i = 0; i < page_count; i++) {
86 page_array[i] = alloc_page(GFP_KERNEL);
87 if (unlikely(!page_array[i])) {
88 depopulate_page_array(page_array, i);
91 SetPageReserved(page_array[i]);
97 * alloc_rchan_buf - allocate the initial channel buffer
98 * @size: total size of the buffer
99 * @page_array: receives a pointer to the buffer's page array
100 * @page_count: receives the number of pages allocated
102 * Returns a pointer to the resulting buffer, NULL if unsuccessful
105 alloc_rchan_buf(unsigned long size, struct page ***page_array, int *page_count)
110 *page_array = alloc_page_array(size, page_count, &err);
114 err = populate_page_array(*page_array, *page_count);
116 free_page_array(*page_array);
121 mem = vmap(*page_array, *page_count, GFP_KERNEL, PAGE_KERNEL);
123 depopulate_page_array(*page_array, *page_count);
124 free_page_array(*page_array);
128 memset(mem, 0, size);
134 * free_rchan_buf - free a channel buffer
135 * @buf: pointer to the buffer to free
136 * @page_array: pointer to the buffer's page array
137 * @page_count: number of pages in page array
140 free_rchan_buf(void *buf, struct page **page_array, int page_count)
143 depopulate_page_array(page_array, page_count);
144 free_page_array(page_array);
148 * expand_check - check whether the channel needs expanding
149 * @rchan: the channel
151 * If the channel needs expanding, the needs_resize callback is
152 * called with RELAY_RESIZE_EXPAND.
154 * Returns the suggested number of sub-buffers for the new
158 expand_check(struct rchan *rchan)
162 u32 threshold = rchan->n_bufs * RESIZE_THRESHOLD;
167 if (rchan->resize_min == 0)
170 if (rchan->resizing || rchan->replace_buffer)
173 active_bufs = rchan->bufs_produced - rchan->bufs_consumed + 1;
175 if (rchan->resize_max && active_bufs == threshold) {
176 new_n_bufs = rchan->n_bufs * 2;
179 if (new_n_bufs && (new_n_bufs * rchan->buf_size <= rchan->resize_max))
180 rchan->callbacks->needs_resize(rchan->id,
187 * can_shrink - check whether the channel can shrink
188 * @rchan: the channel
189 * @cur_idx: the current channel index
191 * Returns the suggested number of sub-buffers for the new
192 * buffer, 0 if the buffer is not shrinkable.
195 can_shrink(struct rchan *rchan, u32 cur_idx)
197 u32 active_bufs = rchan->bufs_produced - rchan->bufs_consumed + 1;
199 u32 cur_bufno_bytes = cur_idx % rchan->buf_size;
201 if (rchan->resize_min == 0 ||
202 rchan->resize_min >= rchan->n_bufs * rchan->buf_size)
208 if (cur_bufno_bytes != rchan->bytes_consumed)
211 new_n_bufs = rchan->resize_min / rchan->buf_size;
217 * shrink_check: - timer function checking whether the channel can shrink
220 * Every SHRINK_TIMER_SECS, check whether the channel is shrinkable.
221 * If so, we attempt to atomically reset the channel to the beginning.
222 * The needs_resize callback is then called with RELAY_RESIZE_SHRINK.
223 * If the reset fails, it means we really shouldn't be shrinking now
224 * and need to wait until the next time around.
227 shrink_check(unsigned long data)
229 struct rchan *rchan = (struct rchan *)data;
230 u32 shrink_to_nbufs, cur_idx;
232 del_timer(&rchan->shrink_timer);
233 rchan->shrink_timer.expires = jiffies + SHRINK_TIMER_SECS * HZ;
234 add_timer(&rchan->shrink_timer);
239 if (rchan->resizing || rchan->replace_buffer)
242 if (using_lockless(rchan))
243 cur_idx = idx(rchan);
245 cur_idx = relay_get_offset(rchan, NULL);
247 shrink_to_nbufs = can_shrink(rchan, cur_idx);
248 if (shrink_to_nbufs != 0 && reset_index(rchan, cur_idx) == 0) {
249 update_readers_consumed(rchan, rchan->bufs_consumed, 0);
250 rchan->callbacks->needs_resize(rchan->id,
258 * init_shrink_timer: - Start timer used to check shrinkability.
259 * @rchan: the channel
262 init_shrink_timer(struct rchan *rchan)
264 if (rchan->resize_min) {
265 init_timer(&rchan->shrink_timer);
266 rchan->shrink_timer.function = shrink_check;
267 rchan->shrink_timer.data = (unsigned long)rchan;
268 rchan->shrink_timer.expires = jiffies + SHRINK_TIMER_SECS * HZ;
269 add_timer(&rchan->shrink_timer);
275 * alloc_new_pages - allocate new pages for expanding buffer
276 * @rchan: the channel
278 * Returns 0 on success, negative otherwise.
281 alloc_new_pages(struct rchan *rchan)
283 int new_pages_size, err;
285 if (unlikely(rchan->expand_page_array)) BUG();
287 new_pages_size = rchan->resize_alloc_size - rchan->alloc_size;
288 rchan->expand_page_array = alloc_page_array(new_pages_size,
289 &rchan->expand_page_count, &err);
290 if (rchan->expand_page_array == NULL) {
291 rchan->resize_err = -ENOMEM;
295 err = populate_page_array(rchan->expand_page_array,
296 rchan->expand_page_count);
298 rchan->resize_err = -ENOMEM;
299 free_page_array(rchan->expand_page_array);
300 rchan->expand_page_array = NULL;
307 * clear_resize_offset - helper function for buffer resizing
308 * @rchan: the channel
310 * Clear the saved offset change.
313 clear_resize_offset(struct rchan *rchan)
315 rchan->resize_offset.ge = 0UL;
316 rchan->resize_offset.le = 0UL;
317 rchan->resize_offset.delta = 0;
321 * save_resize_offset - helper function for buffer resizing
322 * @rchan: the channel
323 * @ge: affected region ge this
324 * @le: affected region le this
325 * @delta: apply this delta
327 * Save a resize offset.
330 save_resize_offset(struct rchan *rchan, u32 ge, u32 le, int delta)
332 rchan->resize_offset.ge = ge;
333 rchan->resize_offset.le = le;
334 rchan->resize_offset.delta = delta;
338 * update_file_offset - apply offset change to reader
339 * @reader: the channel reader
340 * @change_idx: the offset index into the offsets array
342 * Returns non-zero if the offset was applied.
344 * Apply the offset delta saved in change_idx to the reader's
345 * current read position.
348 update_file_offset(struct rchan_reader *reader)
351 struct rchan *rchan = reader->rchan;
353 int delta = reader->rchan->resize_offset.delta;
355 if (reader->vfs_reader)
356 f_pos = (u32)reader->pos.file->f_pos;
358 f_pos = reader->pos.f_pos;
360 if (f_pos == relay_get_offset(rchan, NULL))
363 if ((f_pos >= rchan->resize_offset.ge - 1) &&
364 (f_pos <= rchan->resize_offset.le)) {
365 if (reader->vfs_reader)
366 reader->pos.file->f_pos += delta;
368 reader->pos.f_pos += delta;
376 * update_file_offsets - apply offset change to readers
377 * @rchan: the channel
379 * Apply the saved offset deltas to all files open on the channel.
382 update_file_offsets(struct rchan *rchan)
385 struct rchan_reader *reader;
387 read_lock(&rchan->open_readers_lock);
388 list_for_each(p, &rchan->open_readers) {
389 reader = list_entry(p, struct rchan_reader, list);
390 if (update_file_offset(reader))
391 reader->offset_changed = 1;
393 read_unlock(&rchan->open_readers_lock);
397 * setup_expand_buf - setup expand buffer for replacement
398 * @rchan: the channel
399 * @newsize: the size of the new buffer
400 * @oldsize: the size of the old buffer
401 * @old_n_bufs: the number of sub-buffers in the old buffer
403 * Inserts new pages into the old buffer to create a larger
404 * new channel buffer, splitting them at old_cur_idx, the bottom
405 * half of the old buffer going to the bottom of the new, likewise
409 setup_expand_buf(struct rchan *rchan, int newsize, int oldsize, u32 old_n_bufs)
412 int cur_bufno, delta, i, j;
415 u32 free_bufs, free_pages;
416 u32 free_pages_in_cur_buf;
417 u32 free_bufs_to_end;
418 u32 cur_pages = rchan->alloc_size >> PAGE_SHIFT;
419 u32 pages_per_buf = cur_pages / rchan->n_bufs;
420 u32 bufs_ready = rchan->bufs_produced - rchan->bufs_consumed;
422 if (!rchan->resize_page_array || !rchan->expand_page_array ||
423 !rchan->buf_page_array)
426 if (bufs_ready >= rchan->n_bufs) {
427 bufs_ready = rchan->n_bufs;
430 free_bufs = rchan->n_bufs - bufs_ready - 1;
432 cur_idx = relay_get_offset(rchan, NULL);
433 cur_pageno = cur_idx / PAGE_SIZE;
434 cur_bufno = cur_idx / rchan->buf_size;
436 free_pages_in_cur_buf = (pages_per_buf - 1) - (cur_pageno % pages_per_buf);
437 free_pages = free_bufs * pages_per_buf + free_pages_in_cur_buf;
438 free_bufs_to_end = (rchan->n_bufs - 1) - cur_bufno;
439 if (free_bufs >= free_bufs_to_end) {
440 free_pages = free_bufs_to_end * pages_per_buf + free_pages_in_cur_buf;
441 free_bufs = free_bufs_to_end;
444 for (i = 0, j = 0; i <= cur_pageno + free_pages; i++, j++)
445 rchan->resize_page_array[j] = rchan->buf_page_array[i];
446 for (i = 0; i < rchan->expand_page_count; i++, j++)
447 rchan->resize_page_array[j] = rchan->expand_page_array[i];
448 for (i = cur_pageno + free_pages + 1; i < rchan->buf_page_count; i++, j++)
449 rchan->resize_page_array[j] = rchan->buf_page_array[i];
451 delta = newsize - oldsize;
452 ge = (cur_pageno + 1 + free_pages) * PAGE_SIZE;
454 save_resize_offset(rchan, ge, le, delta);
456 rchan->expand_buf_id = rchan->buf_id + 1 + free_bufs;
460 * setup_shrink_buf - setup shrink buffer for replacement
461 * @rchan: the channel
463 * Removes pages from the old buffer to create a smaller
464 * new channel buffer.
467 setup_shrink_buf(struct rchan *rchan)
472 if (!rchan->resize_page_array || !rchan->shrink_page_array ||
473 !rchan->buf_page_array)
476 copy_end_page = rchan->resize_alloc_size / PAGE_SIZE;
478 for (i = 0; i < copy_end_page; i++)
479 rchan->resize_page_array[i] = rchan->buf_page_array[i];
483 * cleanup_failed_alloc - relaybuf_alloc helper
486 cleanup_failed_alloc(struct rchan *rchan)
488 if (rchan->expand_page_array) {
489 depopulate_page_array(rchan->expand_page_array,
490 rchan->expand_page_count);
491 free_page_array(rchan->expand_page_array);
492 rchan->expand_page_array = NULL;
493 rchan->expand_page_count = 0;
494 } else if (rchan->shrink_page_array) {
495 free_page_array(rchan->shrink_page_array);
496 rchan->shrink_page_array = NULL;
497 rchan->shrink_page_count = 0;
500 if (rchan->resize_page_array) {
501 free_page_array(rchan->resize_page_array);
502 rchan->resize_page_array = NULL;
503 rchan->resize_page_count = 0;
508 * relaybuf_alloc - allocate a new resized channel buffer
509 * @private: pointer to the channel struct
511 * Internal - manages the allocation and remapping of new channel
515 relaybuf_alloc(void *private)
517 struct rchan *rchan = (struct rchan *)private;
521 int free_start_page, free_end_page;
522 u32 newsize, oldsize;
524 if (rchan->resize_alloc_size > rchan->alloc_size) {
525 err = alloc_new_pages(rchan);
526 if (err) goto cleanup;
528 free_size = rchan->alloc_size - rchan->resize_alloc_size;
529 BUG_ON(free_size <= 0);
530 rchan->shrink_page_array = alloc_page_array(free_size,
531 &rchan->shrink_page_count, &err);
532 if (rchan->shrink_page_array == NULL)
534 free_start_page = rchan->resize_alloc_size / PAGE_SIZE;
535 free_end_page = rchan->alloc_size / PAGE_SIZE;
536 for (i = 0, j = free_start_page; j < free_end_page; i++, j++)
537 rchan->shrink_page_array[i] = rchan->buf_page_array[j];
540 rchan->resize_page_array = alloc_page_array(rchan->resize_alloc_size,
541 &rchan->resize_page_count, &err);
542 if (rchan->resize_page_array == NULL)
545 old_cur_idx = relay_get_offset(rchan, NULL);
546 clear_resize_offset(rchan);
547 newsize = rchan->resize_alloc_size;
548 oldsize = rchan->alloc_size;
549 if (newsize > oldsize)
550 setup_expand_buf(rchan, newsize, oldsize, rchan->n_bufs);
552 setup_shrink_buf(rchan);
554 rchan->resize_buf = vmap(rchan->resize_page_array, rchan->resize_page_count, GFP_KERNEL, PAGE_KERNEL);
556 if (rchan->resize_buf == NULL)
559 rchan->replace_buffer = 1;
562 rchan->callbacks->needs_resize(rchan->id, RELAY_RESIZE_REPLACE, 0, 0);
566 cleanup_failed_alloc(rchan);
567 rchan->resize_err = -ENOMEM;
572 * relaybuf_free - free a resized channel buffer
573 * @private: pointer to the channel struct
575 * Internal - manages the de-allocation and unmapping of old channel
579 relaybuf_free(void *private)
581 struct free_rchan_buf *free_buf = (struct free_rchan_buf *)private;
584 if (free_buf->unmap_buf)
585 vunmap(free_buf->unmap_buf);
587 for (i = 0; i < 3; i++) {
588 if (!free_buf->page_array[i].array)
590 if (free_buf->page_array[i].count)
591 depopulate_page_array(free_buf->page_array[i].array,
592 free_buf->page_array[i].count);
593 free_page_array(free_buf->page_array[i].array);
600 * calc_order - determine the power-of-2 order of a resize
601 * @high: the larger size
602 * @low: the smaller size
607 calc_order(u32 high, u32 low)
611 if (!high || !low || high <= low)
623 * check_size - check the sanity of the requested channel size
624 * @rchan: the channel
625 * @nbufs: the new number of sub-buffers
628 * Returns the non-zero total buffer size if ok, otherwise 0 and
629 * sets errcode if not.
632 check_size(struct rchan *rchan, u32 nbufs, int *err)
634 u32 new_channel_size = 0;
638 if (nbufs > rchan->n_bufs) {
639 rchan->resize_order = calc_order(nbufs, rchan->n_bufs);
640 if (!rchan->resize_order) {
645 new_channel_size = rchan->buf_size * nbufs;
646 if (new_channel_size > rchan->resize_max) {
650 } else if (nbufs < rchan->n_bufs) {
651 if (rchan->n_bufs < 2) {
655 rchan->resize_order = -calc_order(rchan->n_bufs, nbufs);
656 if (!rchan->resize_order) {
661 new_channel_size = rchan->buf_size * nbufs;
662 if (new_channel_size < rchan->resize_min) {
669 return new_channel_size;
673 * __relay_realloc_buffer - allocate a new channel buffer
674 * @rchan: the channel
675 * @new_nbufs: the new number of sub-buffers
676 * @async: do the allocation using a work queue
678 * Internal - see relay_realloc_buffer() for details.
681 __relay_realloc_buffer(struct rchan *rchan, u32 new_nbufs, int async)
683 u32 new_channel_size;
686 if (new_nbufs == rchan->n_bufs)
689 if (down_trylock(&rchan->resize_sem))
692 if (rchan->init_buf) {
697 if (rchan->replace_buffer) {
702 if (rchan->resizing) {
708 if (rchan->resize_failures > MAX_RESIZE_FAILURES) {
713 new_channel_size = check_size(rchan, new_nbufs, &err);
717 rchan->resize_n_bufs = new_nbufs;
718 rchan->resize_buf_size = rchan->buf_size;
719 rchan->resize_alloc_size = FIX_SIZE(new_channel_size);
722 INIT_WORK(&rchan->work, relaybuf_alloc, rchan);
723 schedule_delayed_work(&rchan->work, 1);
725 relaybuf_alloc((void *)rchan);
727 up(&rchan->resize_sem);
733 * relay_realloc_buffer - allocate a new channel buffer
734 * @rchan_id: the channel id
735 * @bufsize: the new sub-buffer size
736 * @nbufs: the new number of sub-buffers
738 * Allocates a new channel buffer using the specified sub-buffer size
739 * and count. If async is non-zero, the allocation is done in the
740 * background using a work queue. When the allocation has completed,
741 * the needs_resize() callback is called with a resize_type of
742 * RELAY_RESIZE_REPLACE. This function doesn't replace the old buffer
743 * with the new - see relay_replace_buffer(). See
744 * Documentation/filesystems/relayfs.txt for more details.
746 * Returns 0 on success, or errcode if the channel is busy or if
747 * the allocation couldn't happen for some reason.
750 relay_realloc_buffer(int rchan_id, u32 new_nbufs, int async)
756 rchan = rchan_get(rchan_id);
760 err = __relay_realloc_buffer(rchan, new_nbufs, async);
768 * expand_cancel_check - check whether the current expand needs canceling
769 * @rchan: the channel
771 * Returns 1 if the expand should be canceled, 0 otherwise.
774 expand_cancel_check(struct rchan *rchan)
776 if (rchan->buf_id >= rchan->expand_buf_id)
783 * shrink_cancel_check - check whether the current shrink needs canceling
784 * @rchan: the channel
786 * Returns 1 if the shrink should be canceled, 0 otherwise.
789 shrink_cancel_check(struct rchan *rchan, u32 newsize)
791 u32 active_bufs = rchan->bufs_produced - rchan->bufs_consumed + 1;
792 u32 cur_idx = relay_get_offset(rchan, NULL);
794 if (cur_idx >= newsize)
804 * switch_rchan_buf - do_replace_buffer helper
807 switch_rchan_buf(struct rchan *rchan,
813 u32 newbufs, cur_bufno;
816 cur_bufno = cur_idx / rchan->buf_size;
818 rchan->buf = rchan->resize_buf;
819 rchan->alloc_size = rchan->resize_alloc_size;
820 rchan->n_bufs = rchan->resize_n_bufs;
822 if (newsize > oldsize) {
823 u32 ge = rchan->resize_offset.ge;
824 u32 moved_buf = ge / rchan->buf_size;
826 newbufs = (newsize - oldsize) / rchan->buf_size;
827 for (i = moved_buf; i < old_nbufs; i++) {
828 if (using_lockless(rchan))
829 atomic_set(&fill_count(rchan, i + newbufs),
830 atomic_read(&fill_count(rchan, i)));
831 rchan->unused_bytes[i + newbufs] = rchan->unused_bytes[i];
833 for (i = moved_buf; i < moved_buf + newbufs; i++) {
834 if (using_lockless(rchan))
835 atomic_set(&fill_count(rchan, i),
836 (int)RELAY_BUF_SIZE(offset_bits(rchan)));
837 rchan->unused_bytes[i] = 0;
841 rchan->buf_idx = cur_bufno;
843 if (!using_lockless(rchan)) {
844 cur_write_pos(rchan) = rchan->buf + cur_idx;
845 write_buf(rchan) = rchan->buf + cur_bufno * rchan->buf_size;
846 write_buf_end(rchan) = write_buf(rchan) + rchan->buf_size;
847 write_limit(rchan) = write_buf_end(rchan) - rchan->end_reserve;
849 idx(rchan) &= idx_mask(rchan);
850 bufno_bits(rchan) += rchan->resize_order;
852 (1UL << (bufno_bits(rchan) + offset_bits(rchan))) - 1;
857 * do_replace_buffer - does the work of channel buffer replacement
858 * @rchan: the channel
859 * @newsize: new channel buffer size
860 * @oldsize: old channel buffer size
861 * @old_n_bufs: old channel sub-buffer count
863 * Returns 0 if replacement happened, 1 if canceled
865 * Does the work of switching buffers and fixing everything up
866 * so the channel can continue with a new size.
869 do_replace_buffer(struct rchan *rchan,
878 cur_idx = relay_get_offset(rchan, NULL);
880 if (newsize > oldsize)
881 canceled = expand_cancel_check(rchan);
883 canceled = shrink_cancel_check(rchan, newsize);
890 switch_rchan_buf(rchan, newsize, oldsize, old_nbufs, cur_idx);
892 if (rchan->resize_offset.delta)
893 update_file_offsets(rchan);
895 atomic_set(&rchan->suspended, 0);
897 rchan->old_buf_page_array = rchan->buf_page_array;
898 rchan->buf_page_array = rchan->resize_page_array;
899 rchan->buf_page_count = rchan->resize_page_count;
900 rchan->resize_page_array = NULL;
901 rchan->resize_page_count = 0;
902 rchan->resize_buf = NULL;
903 rchan->resize_buf_size = 0;
904 rchan->resize_alloc_size = 0;
905 rchan->resize_n_bufs = 0;
906 rchan->resize_err = 0;
907 rchan->resize_order = 0;
909 rchan->callbacks->needs_resize(rchan->id,
910 RELAY_RESIZE_REPLACED,
917 * add_free_page_array - add a page_array to be freed
918 * @free_rchan_buf: the free_rchan_buf struct
919 * @page_array: the page array to free
920 * @page_count: the number of pages to free, 0 to free the array only
922 * Internal - Used add page_arrays to be freed asynchronously.
925 add_free_page_array(struct free_rchan_buf *free_rchan_buf,
926 struct page **page_array, int page_count)
928 int cur = free_rchan_buf->cur++;
930 free_rchan_buf->page_array[cur].array = page_array;
931 free_rchan_buf->page_array[cur].count = page_count;
935 * free_replaced_buffer - free a channel's old buffer
936 * @rchan: the channel
937 * @oldbuf: the old buffer
938 * @oldsize: old buffer size
940 * Frees a channel buffer via work queue.
943 free_replaced_buffer(struct rchan *rchan, char *oldbuf, int oldsize)
945 struct free_rchan_buf *free_buf;
947 free_buf = kmalloc(sizeof(struct free_rchan_buf), GFP_ATOMIC);
950 memset(free_buf, 0, sizeof(struct free_rchan_buf));
952 free_buf->unmap_buf = oldbuf;
953 add_free_page_array(free_buf, rchan->old_buf_page_array, 0);
954 rchan->old_buf_page_array = NULL;
955 add_free_page_array(free_buf, rchan->expand_page_array, 0);
956 add_free_page_array(free_buf, rchan->shrink_page_array, rchan->shrink_page_count);
958 rchan->expand_page_array = NULL;
959 rchan->expand_page_count = 0;
960 rchan->shrink_page_array = NULL;
961 rchan->shrink_page_count = 0;
963 INIT_WORK(&free_buf->work, relaybuf_free, free_buf);
964 schedule_delayed_work(&free_buf->work, 1);
970 * free_canceled_resize - free buffers allocated for a canceled resize
971 * @rchan: the channel
973 * Frees canceled buffers via work queue.
976 free_canceled_resize(struct rchan *rchan)
978 struct free_rchan_buf *free_buf;
980 free_buf = kmalloc(sizeof(struct free_rchan_buf), GFP_ATOMIC);
983 memset(free_buf, 0, sizeof(struct free_rchan_buf));
985 if (rchan->resize_alloc_size > rchan->alloc_size)
986 add_free_page_array(free_buf, rchan->expand_page_array, rchan->expand_page_count);
988 add_free_page_array(free_buf, rchan->shrink_page_array, 0);
990 add_free_page_array(free_buf, rchan->resize_page_array, 0);
991 free_buf->unmap_buf = rchan->resize_buf;
993 rchan->expand_page_array = NULL;
994 rchan->expand_page_count = 0;
995 rchan->shrink_page_array = NULL;
996 rchan->shrink_page_count = 0;
997 rchan->resize_page_array = NULL;
998 rchan->resize_page_count = 0;
999 rchan->resize_buf = NULL;
1001 INIT_WORK(&free_buf->work, relaybuf_free, free_buf);
1002 schedule_delayed_work(&free_buf->work, 1);
1008 * __relay_replace_buffer - replace channel buffer with new buffer
1009 * @rchan: the channel
1011 * Internal - see relay_replace_buffer() for details.
1013 * Returns 0 if successful, negative otherwise.
1016 __relay_replace_buffer(struct rchan *rchan)
1022 if (down_trylock(&rchan->resize_sem))
1025 if (rchan->init_buf) {
1030 if (!rchan->replace_buffer)
1033 if (rchan->resizing) {
1038 if (rchan->resize_buf == NULL) {
1043 oldbuf = rchan->buf;
1044 oldsize = rchan->alloc_size;
1046 err = do_replace_buffer(rchan, rchan->resize_alloc_size,
1047 oldsize, rchan->n_bufs);
1049 err = free_replaced_buffer(rchan, oldbuf, oldsize);
1051 err = free_canceled_resize(rchan);
1053 rchan->replace_buffer = 0;
1054 up(&rchan->resize_sem);
1060 * relay_replace_buffer - replace channel buffer with new buffer
1061 * @rchan_id: the channel id
1063 * Replaces the current channel buffer with the new buffer allocated
1064 * by relay_alloc_buffer and contained in the channel struct. When the
1065 * replacement is complete, the needs_resize() callback is called with
1066 * RELAY_RESIZE_REPLACED.
1068 * Returns 0 on success, or errcode if the channel is busy or if
1069 * the replacement or previous allocation didn't happen for some reason.
1072 relay_replace_buffer(int rchan_id)
1076 struct rchan *rchan;
1078 rchan = rchan_get(rchan_id);
1082 err = __relay_replace_buffer(rchan);
1089 EXPORT_SYMBOL(relay_realloc_buffer);
1090 EXPORT_SYMBOL(relay_replace_buffer);