mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-12-15 15:04:27 +08:00
b0d8d22921
The pipe code was trying (and failing) to be very careful about freeing
the pipe info only after the last access, with a pattern like:
spin_lock(&inode->i_lock);
if (!--pipe->files) {
inode->i_pipe = NULL;
kill = 1;
}
spin_unlock(&inode->i_lock);
__pipe_unlock(pipe);
if (kill)
free_pipe_info(pipe);
where the final freeing is done last.
HOWEVER. The above is actually broken, because while the freeing is
done at the end, if we have two racing processes releasing the pipe
inode info, the one that *doesn't* free it will decrement the ->files
count, and unlock the inode i_lock, but then still use the
"pipe_inode_info" afterwards when it does the "__pipe_unlock(pipe)".
This is *very* hard to trigger in practice, since the race window is
very small, and adding debug options seems to just hide it by slowing
things down.
Simon originally reported this way back in July as an Oops in
kmem_cache_allocate due to a single bit corruption (due to the final
"spin_unlock(pipe->mutex.wait_lock)" incrementing a field in a different
allocation that had re-used the free'd pipe-info), it's taken this long
to figure out.
Since the 'pipe->files' accesses aren't even protected by the pipe lock
(we very much use the inode lock for that), the simple solution is to
just drop the pipe lock early. And since there were two users of this
pattern, create a helper function for it.
Introduced commit ba5bb14733
("pipe: take allocation and freeing of
pipe_inode_info out of ->i_mutex").
Reported-by: Simon Kirby <sim@hostway.ca>
Reported-by: Ian Applegate <ia@cloudflare.com>
Acked-by: Al Viro <viro@zeniv.linux.org.uk>
Cc: stable@kernel.org # v3.10+
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
1326 lines
29 KiB
C
1326 lines
29 KiB
C
/*
|
|
* linux/fs/pipe.c
|
|
*
|
|
* Copyright (C) 1991, 1992, 1999 Linus Torvalds
|
|
*/
|
|
|
|
#include <linux/mm.h>
|
|
#include <linux/file.h>
|
|
#include <linux/poll.h>
|
|
#include <linux/slab.h>
|
|
#include <linux/module.h>
|
|
#include <linux/init.h>
|
|
#include <linux/fs.h>
|
|
#include <linux/log2.h>
|
|
#include <linux/mount.h>
|
|
#include <linux/magic.h>
|
|
#include <linux/pipe_fs_i.h>
|
|
#include <linux/uio.h>
|
|
#include <linux/highmem.h>
|
|
#include <linux/pagemap.h>
|
|
#include <linux/audit.h>
|
|
#include <linux/syscalls.h>
|
|
#include <linux/fcntl.h>
|
|
#include <linux/aio.h>
|
|
|
|
#include <asm/uaccess.h>
|
|
#include <asm/ioctls.h>
|
|
|
|
#include "internal.h"
|
|
|
|
/*
|
|
* The max size that a non-root user is allowed to grow the pipe. Can
|
|
* be set by root in /proc/sys/fs/pipe-max-size
|
|
*/
|
|
unsigned int pipe_max_size = 1048576;
|
|
|
|
/*
|
|
* Minimum pipe size, as required by POSIX
|
|
*/
|
|
unsigned int pipe_min_size = PAGE_SIZE;
|
|
|
|
/*
|
|
* We use a start+len construction, which provides full use of the
|
|
* allocated memory.
|
|
* -- Florian Coosmann (FGC)
|
|
*
|
|
* Reads with count = 0 should always return 0.
|
|
* -- Julian Bradfield 1999-06-07.
|
|
*
|
|
* FIFOs and Pipes now generate SIGIO for both readers and writers.
|
|
* -- Jeremy Elson <jelson@circlemud.org> 2001-08-16
|
|
*
|
|
* pipe_read & write cleanup
|
|
* -- Manfred Spraul <manfred@colorfullife.com> 2002-05-09
|
|
*/
|
|
|
|
static void pipe_lock_nested(struct pipe_inode_info *pipe, int subclass)
|
|
{
|
|
if (pipe->files)
|
|
mutex_lock_nested(&pipe->mutex, subclass);
|
|
}
|
|
|
|
void pipe_lock(struct pipe_inode_info *pipe)
|
|
{
|
|
/*
|
|
* pipe_lock() nests non-pipe inode locks (for writing to a file)
|
|
*/
|
|
pipe_lock_nested(pipe, I_MUTEX_PARENT);
|
|
}
|
|
EXPORT_SYMBOL(pipe_lock);
|
|
|
|
void pipe_unlock(struct pipe_inode_info *pipe)
|
|
{
|
|
if (pipe->files)
|
|
mutex_unlock(&pipe->mutex);
|
|
}
|
|
EXPORT_SYMBOL(pipe_unlock);
|
|
|
|
static inline void __pipe_lock(struct pipe_inode_info *pipe)
|
|
{
|
|
mutex_lock_nested(&pipe->mutex, I_MUTEX_PARENT);
|
|
}
|
|
|
|
static inline void __pipe_unlock(struct pipe_inode_info *pipe)
|
|
{
|
|
mutex_unlock(&pipe->mutex);
|
|
}
|
|
|
|
void pipe_double_lock(struct pipe_inode_info *pipe1,
|
|
struct pipe_inode_info *pipe2)
|
|
{
|
|
BUG_ON(pipe1 == pipe2);
|
|
|
|
if (pipe1 < pipe2) {
|
|
pipe_lock_nested(pipe1, I_MUTEX_PARENT);
|
|
pipe_lock_nested(pipe2, I_MUTEX_CHILD);
|
|
} else {
|
|
pipe_lock_nested(pipe2, I_MUTEX_PARENT);
|
|
pipe_lock_nested(pipe1, I_MUTEX_CHILD);
|
|
}
|
|
}
|
|
|
|
/* Drop the inode semaphore and wait for a pipe event, atomically */
|
|
void pipe_wait(struct pipe_inode_info *pipe)
|
|
{
|
|
DEFINE_WAIT(wait);
|
|
|
|
/*
|
|
* Pipes are system-local resources, so sleeping on them
|
|
* is considered a noninteractive wait:
|
|
*/
|
|
prepare_to_wait(&pipe->wait, &wait, TASK_INTERRUPTIBLE);
|
|
pipe_unlock(pipe);
|
|
schedule();
|
|
finish_wait(&pipe->wait, &wait);
|
|
pipe_lock(pipe);
|
|
}
|
|
|
|
static int
|
|
pipe_iov_copy_from_user(void *to, struct iovec *iov, unsigned long len,
|
|
int atomic)
|
|
{
|
|
unsigned long copy;
|
|
|
|
while (len > 0) {
|
|
while (!iov->iov_len)
|
|
iov++;
|
|
copy = min_t(unsigned long, len, iov->iov_len);
|
|
|
|
if (atomic) {
|
|
if (__copy_from_user_inatomic(to, iov->iov_base, copy))
|
|
return -EFAULT;
|
|
} else {
|
|
if (copy_from_user(to, iov->iov_base, copy))
|
|
return -EFAULT;
|
|
}
|
|
to += copy;
|
|
len -= copy;
|
|
iov->iov_base += copy;
|
|
iov->iov_len -= copy;
|
|
}
|
|
return 0;
|
|
}
|
|
|
|
static int
|
|
pipe_iov_copy_to_user(struct iovec *iov, const void *from, unsigned long len,
|
|
int atomic)
|
|
{
|
|
unsigned long copy;
|
|
|
|
while (len > 0) {
|
|
while (!iov->iov_len)
|
|
iov++;
|
|
copy = min_t(unsigned long, len, iov->iov_len);
|
|
|
|
if (atomic) {
|
|
if (__copy_to_user_inatomic(iov->iov_base, from, copy))
|
|
return -EFAULT;
|
|
} else {
|
|
if (copy_to_user(iov->iov_base, from, copy))
|
|
return -EFAULT;
|
|
}
|
|
from += copy;
|
|
len -= copy;
|
|
iov->iov_base += copy;
|
|
iov->iov_len -= copy;
|
|
}
|
|
return 0;
|
|
}
|
|
|
|
/*
|
|
* Attempt to pre-fault in the user memory, so we can use atomic copies.
|
|
* Returns the number of bytes not faulted in.
|
|
*/
|
|
static int iov_fault_in_pages_write(struct iovec *iov, unsigned long len)
|
|
{
|
|
while (!iov->iov_len)
|
|
iov++;
|
|
|
|
while (len > 0) {
|
|
unsigned long this_len;
|
|
|
|
this_len = min_t(unsigned long, len, iov->iov_len);
|
|
if (fault_in_pages_writeable(iov->iov_base, this_len))
|
|
break;
|
|
|
|
len -= this_len;
|
|
iov++;
|
|
}
|
|
|
|
return len;
|
|
}
|
|
|
|
/*
|
|
* Pre-fault in the user memory, so we can use atomic copies.
|
|
*/
|
|
static void iov_fault_in_pages_read(struct iovec *iov, unsigned long len)
|
|
{
|
|
while (!iov->iov_len)
|
|
iov++;
|
|
|
|
while (len > 0) {
|
|
unsigned long this_len;
|
|
|
|
this_len = min_t(unsigned long, len, iov->iov_len);
|
|
fault_in_pages_readable(iov->iov_base, this_len);
|
|
len -= this_len;
|
|
iov++;
|
|
}
|
|
}
|
|
|
|
static void anon_pipe_buf_release(struct pipe_inode_info *pipe,
|
|
struct pipe_buffer *buf)
|
|
{
|
|
struct page *page = buf->page;
|
|
|
|
/*
|
|
* If nobody else uses this page, and we don't already have a
|
|
* temporary page, let's keep track of it as a one-deep
|
|
* allocation cache. (Otherwise just release our reference to it)
|
|
*/
|
|
if (page_count(page) == 1 && !pipe->tmp_page)
|
|
pipe->tmp_page = page;
|
|
else
|
|
page_cache_release(page);
|
|
}
|
|
|
|
/**
|
|
* generic_pipe_buf_map - virtually map a pipe buffer
|
|
* @pipe: the pipe that the buffer belongs to
|
|
* @buf: the buffer that should be mapped
|
|
* @atomic: whether to use an atomic map
|
|
*
|
|
* Description:
|
|
* This function returns a kernel virtual address mapping for the
|
|
* pipe_buffer passed in @buf. If @atomic is set, an atomic map is provided
|
|
* and the caller has to be careful not to fault before calling
|
|
* the unmap function.
|
|
*
|
|
* Note that this function calls kmap_atomic() if @atomic != 0.
|
|
*/
|
|
void *generic_pipe_buf_map(struct pipe_inode_info *pipe,
|
|
struct pipe_buffer *buf, int atomic)
|
|
{
|
|
if (atomic) {
|
|
buf->flags |= PIPE_BUF_FLAG_ATOMIC;
|
|
return kmap_atomic(buf->page);
|
|
}
|
|
|
|
return kmap(buf->page);
|
|
}
|
|
EXPORT_SYMBOL(generic_pipe_buf_map);
|
|
|
|
/**
|
|
* generic_pipe_buf_unmap - unmap a previously mapped pipe buffer
|
|
* @pipe: the pipe that the buffer belongs to
|
|
* @buf: the buffer that should be unmapped
|
|
* @map_data: the data that the mapping function returned
|
|
*
|
|
* Description:
|
|
* This function undoes the mapping that ->map() provided.
|
|
*/
|
|
void generic_pipe_buf_unmap(struct pipe_inode_info *pipe,
|
|
struct pipe_buffer *buf, void *map_data)
|
|
{
|
|
if (buf->flags & PIPE_BUF_FLAG_ATOMIC) {
|
|
buf->flags &= ~PIPE_BUF_FLAG_ATOMIC;
|
|
kunmap_atomic(map_data);
|
|
} else
|
|
kunmap(buf->page);
|
|
}
|
|
EXPORT_SYMBOL(generic_pipe_buf_unmap);
|
|
|
|
/**
|
|
* generic_pipe_buf_steal - attempt to take ownership of a &pipe_buffer
|
|
* @pipe: the pipe that the buffer belongs to
|
|
* @buf: the buffer to attempt to steal
|
|
*
|
|
* Description:
|
|
* This function attempts to steal the &struct page attached to
|
|
* @buf. If successful, this function returns 0 and returns with
|
|
* the page locked. The caller may then reuse the page for whatever
|
|
* he wishes; the typical use is insertion into a different file
|
|
* page cache.
|
|
*/
|
|
int generic_pipe_buf_steal(struct pipe_inode_info *pipe,
|
|
struct pipe_buffer *buf)
|
|
{
|
|
struct page *page = buf->page;
|
|
|
|
/*
|
|
* A reference of one is golden, that means that the owner of this
|
|
* page is the only one holding a reference to it. lock the page
|
|
* and return OK.
|
|
*/
|
|
if (page_count(page) == 1) {
|
|
lock_page(page);
|
|
return 0;
|
|
}
|
|
|
|
return 1;
|
|
}
|
|
EXPORT_SYMBOL(generic_pipe_buf_steal);
|
|
|
|
/**
|
|
* generic_pipe_buf_get - get a reference to a &struct pipe_buffer
|
|
* @pipe: the pipe that the buffer belongs to
|
|
* @buf: the buffer to get a reference to
|
|
*
|
|
* Description:
|
|
* This function grabs an extra reference to @buf. It's used in
|
|
* in the tee() system call, when we duplicate the buffers in one
|
|
* pipe into another.
|
|
*/
|
|
void generic_pipe_buf_get(struct pipe_inode_info *pipe, struct pipe_buffer *buf)
|
|
{
|
|
page_cache_get(buf->page);
|
|
}
|
|
EXPORT_SYMBOL(generic_pipe_buf_get);
|
|
|
|
/**
|
|
* generic_pipe_buf_confirm - verify contents of the pipe buffer
|
|
* @info: the pipe that the buffer belongs to
|
|
* @buf: the buffer to confirm
|
|
*
|
|
* Description:
|
|
* This function does nothing, because the generic pipe code uses
|
|
* pages that are always good when inserted into the pipe.
|
|
*/
|
|
int generic_pipe_buf_confirm(struct pipe_inode_info *info,
|
|
struct pipe_buffer *buf)
|
|
{
|
|
return 0;
|
|
}
|
|
EXPORT_SYMBOL(generic_pipe_buf_confirm);
|
|
|
|
/**
|
|
* generic_pipe_buf_release - put a reference to a &struct pipe_buffer
|
|
* @pipe: the pipe that the buffer belongs to
|
|
* @buf: the buffer to put a reference to
|
|
*
|
|
* Description:
|
|
* This function releases a reference to @buf.
|
|
*/
|
|
void generic_pipe_buf_release(struct pipe_inode_info *pipe,
|
|
struct pipe_buffer *buf)
|
|
{
|
|
page_cache_release(buf->page);
|
|
}
|
|
EXPORT_SYMBOL(generic_pipe_buf_release);
|
|
|
|
static const struct pipe_buf_operations anon_pipe_buf_ops = {
|
|
.can_merge = 1,
|
|
.map = generic_pipe_buf_map,
|
|
.unmap = generic_pipe_buf_unmap,
|
|
.confirm = generic_pipe_buf_confirm,
|
|
.release = anon_pipe_buf_release,
|
|
.steal = generic_pipe_buf_steal,
|
|
.get = generic_pipe_buf_get,
|
|
};
|
|
|
|
static const struct pipe_buf_operations packet_pipe_buf_ops = {
|
|
.can_merge = 0,
|
|
.map = generic_pipe_buf_map,
|
|
.unmap = generic_pipe_buf_unmap,
|
|
.confirm = generic_pipe_buf_confirm,
|
|
.release = anon_pipe_buf_release,
|
|
.steal = generic_pipe_buf_steal,
|
|
.get = generic_pipe_buf_get,
|
|
};
|
|
|
|
static ssize_t
|
|
pipe_read(struct kiocb *iocb, const struct iovec *_iov,
|
|
unsigned long nr_segs, loff_t pos)
|
|
{
|
|
struct file *filp = iocb->ki_filp;
|
|
struct pipe_inode_info *pipe = filp->private_data;
|
|
int do_wakeup;
|
|
ssize_t ret;
|
|
struct iovec *iov = (struct iovec *)_iov;
|
|
size_t total_len;
|
|
|
|
total_len = iov_length(iov, nr_segs);
|
|
/* Null read succeeds. */
|
|
if (unlikely(total_len == 0))
|
|
return 0;
|
|
|
|
do_wakeup = 0;
|
|
ret = 0;
|
|
__pipe_lock(pipe);
|
|
for (;;) {
|
|
int bufs = pipe->nrbufs;
|
|
if (bufs) {
|
|
int curbuf = pipe->curbuf;
|
|
struct pipe_buffer *buf = pipe->bufs + curbuf;
|
|
const struct pipe_buf_operations *ops = buf->ops;
|
|
void *addr;
|
|
size_t chars = buf->len;
|
|
int error, atomic;
|
|
|
|
if (chars > total_len)
|
|
chars = total_len;
|
|
|
|
error = ops->confirm(pipe, buf);
|
|
if (error) {
|
|
if (!ret)
|
|
ret = error;
|
|
break;
|
|
}
|
|
|
|
atomic = !iov_fault_in_pages_write(iov, chars);
|
|
redo:
|
|
addr = ops->map(pipe, buf, atomic);
|
|
error = pipe_iov_copy_to_user(iov, addr + buf->offset, chars, atomic);
|
|
ops->unmap(pipe, buf, addr);
|
|
if (unlikely(error)) {
|
|
/*
|
|
* Just retry with the slow path if we failed.
|
|
*/
|
|
if (atomic) {
|
|
atomic = 0;
|
|
goto redo;
|
|
}
|
|
if (!ret)
|
|
ret = error;
|
|
break;
|
|
}
|
|
ret += chars;
|
|
buf->offset += chars;
|
|
buf->len -= chars;
|
|
|
|
/* Was it a packet buffer? Clean up and exit */
|
|
if (buf->flags & PIPE_BUF_FLAG_PACKET) {
|
|
total_len = chars;
|
|
buf->len = 0;
|
|
}
|
|
|
|
if (!buf->len) {
|
|
buf->ops = NULL;
|
|
ops->release(pipe, buf);
|
|
curbuf = (curbuf + 1) & (pipe->buffers - 1);
|
|
pipe->curbuf = curbuf;
|
|
pipe->nrbufs = --bufs;
|
|
do_wakeup = 1;
|
|
}
|
|
total_len -= chars;
|
|
if (!total_len)
|
|
break; /* common path: read succeeded */
|
|
}
|
|
if (bufs) /* More to do? */
|
|
continue;
|
|
if (!pipe->writers)
|
|
break;
|
|
if (!pipe->waiting_writers) {
|
|
/* syscall merging: Usually we must not sleep
|
|
* if O_NONBLOCK is set, or if we got some data.
|
|
* But if a writer sleeps in kernel space, then
|
|
* we can wait for that data without violating POSIX.
|
|
*/
|
|
if (ret)
|
|
break;
|
|
if (filp->f_flags & O_NONBLOCK) {
|
|
ret = -EAGAIN;
|
|
break;
|
|
}
|
|
}
|
|
if (signal_pending(current)) {
|
|
if (!ret)
|
|
ret = -ERESTARTSYS;
|
|
break;
|
|
}
|
|
if (do_wakeup) {
|
|
wake_up_interruptible_sync_poll(&pipe->wait, POLLOUT | POLLWRNORM);
|
|
kill_fasync(&pipe->fasync_writers, SIGIO, POLL_OUT);
|
|
}
|
|
pipe_wait(pipe);
|
|
}
|
|
__pipe_unlock(pipe);
|
|
|
|
/* Signal writers asynchronously that there is more room. */
|
|
if (do_wakeup) {
|
|
wake_up_interruptible_sync_poll(&pipe->wait, POLLOUT | POLLWRNORM);
|
|
kill_fasync(&pipe->fasync_writers, SIGIO, POLL_OUT);
|
|
}
|
|
if (ret > 0)
|
|
file_accessed(filp);
|
|
return ret;
|
|
}
|
|
|
|
static inline int is_packetized(struct file *file)
|
|
{
|
|
return (file->f_flags & O_DIRECT) != 0;
|
|
}
|
|
|
|
static ssize_t
|
|
pipe_write(struct kiocb *iocb, const struct iovec *_iov,
|
|
unsigned long nr_segs, loff_t ppos)
|
|
{
|
|
struct file *filp = iocb->ki_filp;
|
|
struct pipe_inode_info *pipe = filp->private_data;
|
|
ssize_t ret;
|
|
int do_wakeup;
|
|
struct iovec *iov = (struct iovec *)_iov;
|
|
size_t total_len;
|
|
ssize_t chars;
|
|
|
|
total_len = iov_length(iov, nr_segs);
|
|
/* Null write succeeds. */
|
|
if (unlikely(total_len == 0))
|
|
return 0;
|
|
|
|
do_wakeup = 0;
|
|
ret = 0;
|
|
__pipe_lock(pipe);
|
|
|
|
if (!pipe->readers) {
|
|
send_sig(SIGPIPE, current, 0);
|
|
ret = -EPIPE;
|
|
goto out;
|
|
}
|
|
|
|
/* We try to merge small writes */
|
|
chars = total_len & (PAGE_SIZE-1); /* size of the last buffer */
|
|
if (pipe->nrbufs && chars != 0) {
|
|
int lastbuf = (pipe->curbuf + pipe->nrbufs - 1) &
|
|
(pipe->buffers - 1);
|
|
struct pipe_buffer *buf = pipe->bufs + lastbuf;
|
|
const struct pipe_buf_operations *ops = buf->ops;
|
|
int offset = buf->offset + buf->len;
|
|
|
|
if (ops->can_merge && offset + chars <= PAGE_SIZE) {
|
|
int error, atomic = 1;
|
|
void *addr;
|
|
|
|
error = ops->confirm(pipe, buf);
|
|
if (error)
|
|
goto out;
|
|
|
|
iov_fault_in_pages_read(iov, chars);
|
|
redo1:
|
|
addr = ops->map(pipe, buf, atomic);
|
|
error = pipe_iov_copy_from_user(offset + addr, iov,
|
|
chars, atomic);
|
|
ops->unmap(pipe, buf, addr);
|
|
ret = error;
|
|
do_wakeup = 1;
|
|
if (error) {
|
|
if (atomic) {
|
|
atomic = 0;
|
|
goto redo1;
|
|
}
|
|
goto out;
|
|
}
|
|
buf->len += chars;
|
|
total_len -= chars;
|
|
ret = chars;
|
|
if (!total_len)
|
|
goto out;
|
|
}
|
|
}
|
|
|
|
for (;;) {
|
|
int bufs;
|
|
|
|
if (!pipe->readers) {
|
|
send_sig(SIGPIPE, current, 0);
|
|
if (!ret)
|
|
ret = -EPIPE;
|
|
break;
|
|
}
|
|
bufs = pipe->nrbufs;
|
|
if (bufs < pipe->buffers) {
|
|
int newbuf = (pipe->curbuf + bufs) & (pipe->buffers-1);
|
|
struct pipe_buffer *buf = pipe->bufs + newbuf;
|
|
struct page *page = pipe->tmp_page;
|
|
char *src;
|
|
int error, atomic = 1;
|
|
|
|
if (!page) {
|
|
page = alloc_page(GFP_HIGHUSER);
|
|
if (unlikely(!page)) {
|
|
ret = ret ? : -ENOMEM;
|
|
break;
|
|
}
|
|
pipe->tmp_page = page;
|
|
}
|
|
/* Always wake up, even if the copy fails. Otherwise
|
|
* we lock up (O_NONBLOCK-)readers that sleep due to
|
|
* syscall merging.
|
|
* FIXME! Is this really true?
|
|
*/
|
|
do_wakeup = 1;
|
|
chars = PAGE_SIZE;
|
|
if (chars > total_len)
|
|
chars = total_len;
|
|
|
|
iov_fault_in_pages_read(iov, chars);
|
|
redo2:
|
|
if (atomic)
|
|
src = kmap_atomic(page);
|
|
else
|
|
src = kmap(page);
|
|
|
|
error = pipe_iov_copy_from_user(src, iov, chars,
|
|
atomic);
|
|
if (atomic)
|
|
kunmap_atomic(src);
|
|
else
|
|
kunmap(page);
|
|
|
|
if (unlikely(error)) {
|
|
if (atomic) {
|
|
atomic = 0;
|
|
goto redo2;
|
|
}
|
|
if (!ret)
|
|
ret = error;
|
|
break;
|
|
}
|
|
ret += chars;
|
|
|
|
/* Insert it into the buffer array */
|
|
buf->page = page;
|
|
buf->ops = &anon_pipe_buf_ops;
|
|
buf->offset = 0;
|
|
buf->len = chars;
|
|
buf->flags = 0;
|
|
if (is_packetized(filp)) {
|
|
buf->ops = &packet_pipe_buf_ops;
|
|
buf->flags = PIPE_BUF_FLAG_PACKET;
|
|
}
|
|
pipe->nrbufs = ++bufs;
|
|
pipe->tmp_page = NULL;
|
|
|
|
total_len -= chars;
|
|
if (!total_len)
|
|
break;
|
|
}
|
|
if (bufs < pipe->buffers)
|
|
continue;
|
|
if (filp->f_flags & O_NONBLOCK) {
|
|
if (!ret)
|
|
ret = -EAGAIN;
|
|
break;
|
|
}
|
|
if (signal_pending(current)) {
|
|
if (!ret)
|
|
ret = -ERESTARTSYS;
|
|
break;
|
|
}
|
|
if (do_wakeup) {
|
|
wake_up_interruptible_sync_poll(&pipe->wait, POLLIN | POLLRDNORM);
|
|
kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
|
|
do_wakeup = 0;
|
|
}
|
|
pipe->waiting_writers++;
|
|
pipe_wait(pipe);
|
|
pipe->waiting_writers--;
|
|
}
|
|
out:
|
|
__pipe_unlock(pipe);
|
|
if (do_wakeup) {
|
|
wake_up_interruptible_sync_poll(&pipe->wait, POLLIN | POLLRDNORM);
|
|
kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
|
|
}
|
|
if (ret > 0) {
|
|
int err = file_update_time(filp);
|
|
if (err)
|
|
ret = err;
|
|
}
|
|
return ret;
|
|
}
|
|
|
|
static long pipe_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
|
|
{
|
|
struct pipe_inode_info *pipe = filp->private_data;
|
|
int count, buf, nrbufs;
|
|
|
|
switch (cmd) {
|
|
case FIONREAD:
|
|
__pipe_lock(pipe);
|
|
count = 0;
|
|
buf = pipe->curbuf;
|
|
nrbufs = pipe->nrbufs;
|
|
while (--nrbufs >= 0) {
|
|
count += pipe->bufs[buf].len;
|
|
buf = (buf+1) & (pipe->buffers - 1);
|
|
}
|
|
__pipe_unlock(pipe);
|
|
|
|
return put_user(count, (int __user *)arg);
|
|
default:
|
|
return -ENOIOCTLCMD;
|
|
}
|
|
}
|
|
|
|
/* No kernel lock held - fine */
|
|
static unsigned int
|
|
pipe_poll(struct file *filp, poll_table *wait)
|
|
{
|
|
unsigned int mask;
|
|
struct pipe_inode_info *pipe = filp->private_data;
|
|
int nrbufs;
|
|
|
|
poll_wait(filp, &pipe->wait, wait);
|
|
|
|
/* Reading only -- no need for acquiring the semaphore. */
|
|
nrbufs = pipe->nrbufs;
|
|
mask = 0;
|
|
if (filp->f_mode & FMODE_READ) {
|
|
mask = (nrbufs > 0) ? POLLIN | POLLRDNORM : 0;
|
|
if (!pipe->writers && filp->f_version != pipe->w_counter)
|
|
mask |= POLLHUP;
|
|
}
|
|
|
|
if (filp->f_mode & FMODE_WRITE) {
|
|
mask |= (nrbufs < pipe->buffers) ? POLLOUT | POLLWRNORM : 0;
|
|
/*
|
|
* Most Unices do not set POLLERR for FIFOs but on Linux they
|
|
* behave exactly like pipes for poll().
|
|
*/
|
|
if (!pipe->readers)
|
|
mask |= POLLERR;
|
|
}
|
|
|
|
return mask;
|
|
}
|
|
|
|
static void put_pipe_info(struct inode *inode, struct pipe_inode_info *pipe)
|
|
{
|
|
int kill = 0;
|
|
|
|
spin_lock(&inode->i_lock);
|
|
if (!--pipe->files) {
|
|
inode->i_pipe = NULL;
|
|
kill = 1;
|
|
}
|
|
spin_unlock(&inode->i_lock);
|
|
|
|
if (kill)
|
|
free_pipe_info(pipe);
|
|
}
|
|
|
|
static int
|
|
pipe_release(struct inode *inode, struct file *file)
|
|
{
|
|
struct pipe_inode_info *pipe = file->private_data;
|
|
|
|
__pipe_lock(pipe);
|
|
if (file->f_mode & FMODE_READ)
|
|
pipe->readers--;
|
|
if (file->f_mode & FMODE_WRITE)
|
|
pipe->writers--;
|
|
|
|
if (pipe->readers || pipe->writers) {
|
|
wake_up_interruptible_sync_poll(&pipe->wait, POLLIN | POLLOUT | POLLRDNORM | POLLWRNORM | POLLERR | POLLHUP);
|
|
kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
|
|
kill_fasync(&pipe->fasync_writers, SIGIO, POLL_OUT);
|
|
}
|
|
__pipe_unlock(pipe);
|
|
|
|
put_pipe_info(inode, pipe);
|
|
return 0;
|
|
}
|
|
|
|
static int
|
|
pipe_fasync(int fd, struct file *filp, int on)
|
|
{
|
|
struct pipe_inode_info *pipe = filp->private_data;
|
|
int retval = 0;
|
|
|
|
__pipe_lock(pipe);
|
|
if (filp->f_mode & FMODE_READ)
|
|
retval = fasync_helper(fd, filp, on, &pipe->fasync_readers);
|
|
if ((filp->f_mode & FMODE_WRITE) && retval >= 0) {
|
|
retval = fasync_helper(fd, filp, on, &pipe->fasync_writers);
|
|
if (retval < 0 && (filp->f_mode & FMODE_READ))
|
|
/* this can happen only if on == T */
|
|
fasync_helper(-1, filp, 0, &pipe->fasync_readers);
|
|
}
|
|
__pipe_unlock(pipe);
|
|
return retval;
|
|
}
|
|
|
|
struct pipe_inode_info *alloc_pipe_info(void)
|
|
{
|
|
struct pipe_inode_info *pipe;
|
|
|
|
pipe = kzalloc(sizeof(struct pipe_inode_info), GFP_KERNEL);
|
|
if (pipe) {
|
|
pipe->bufs = kzalloc(sizeof(struct pipe_buffer) * PIPE_DEF_BUFFERS, GFP_KERNEL);
|
|
if (pipe->bufs) {
|
|
init_waitqueue_head(&pipe->wait);
|
|
pipe->r_counter = pipe->w_counter = 1;
|
|
pipe->buffers = PIPE_DEF_BUFFERS;
|
|
mutex_init(&pipe->mutex);
|
|
return pipe;
|
|
}
|
|
kfree(pipe);
|
|
}
|
|
|
|
return NULL;
|
|
}
|
|
|
|
void free_pipe_info(struct pipe_inode_info *pipe)
|
|
{
|
|
int i;
|
|
|
|
for (i = 0; i < pipe->buffers; i++) {
|
|
struct pipe_buffer *buf = pipe->bufs + i;
|
|
if (buf->ops)
|
|
buf->ops->release(pipe, buf);
|
|
}
|
|
if (pipe->tmp_page)
|
|
__free_page(pipe->tmp_page);
|
|
kfree(pipe->bufs);
|
|
kfree(pipe);
|
|
}
|
|
|
|
static struct vfsmount *pipe_mnt __read_mostly;
|
|
|
|
/*
|
|
* pipefs_dname() is called from d_path().
|
|
*/
|
|
static char *pipefs_dname(struct dentry *dentry, char *buffer, int buflen)
|
|
{
|
|
return dynamic_dname(dentry, buffer, buflen, "pipe:[%lu]",
|
|
dentry->d_inode->i_ino);
|
|
}
|
|
|
|
static const struct dentry_operations pipefs_dentry_operations = {
|
|
.d_dname = pipefs_dname,
|
|
};
|
|
|
|
static struct inode * get_pipe_inode(void)
|
|
{
|
|
struct inode *inode = new_inode_pseudo(pipe_mnt->mnt_sb);
|
|
struct pipe_inode_info *pipe;
|
|
|
|
if (!inode)
|
|
goto fail_inode;
|
|
|
|
inode->i_ino = get_next_ino();
|
|
|
|
pipe = alloc_pipe_info();
|
|
if (!pipe)
|
|
goto fail_iput;
|
|
|
|
inode->i_pipe = pipe;
|
|
pipe->files = 2;
|
|
pipe->readers = pipe->writers = 1;
|
|
inode->i_fop = &pipefifo_fops;
|
|
|
|
/*
|
|
* Mark the inode dirty from the very beginning,
|
|
* that way it will never be moved to the dirty
|
|
* list because "mark_inode_dirty()" will think
|
|
* that it already _is_ on the dirty list.
|
|
*/
|
|
inode->i_state = I_DIRTY;
|
|
inode->i_mode = S_IFIFO | S_IRUSR | S_IWUSR;
|
|
inode->i_uid = current_fsuid();
|
|
inode->i_gid = current_fsgid();
|
|
inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME;
|
|
|
|
return inode;
|
|
|
|
fail_iput:
|
|
iput(inode);
|
|
|
|
fail_inode:
|
|
return NULL;
|
|
}
|
|
|
|
int create_pipe_files(struct file **res, int flags)
|
|
{
|
|
int err;
|
|
struct inode *inode = get_pipe_inode();
|
|
struct file *f;
|
|
struct path path;
|
|
static struct qstr name = { .name = "" };
|
|
|
|
if (!inode)
|
|
return -ENFILE;
|
|
|
|
err = -ENOMEM;
|
|
path.dentry = d_alloc_pseudo(pipe_mnt->mnt_sb, &name);
|
|
if (!path.dentry)
|
|
goto err_inode;
|
|
path.mnt = mntget(pipe_mnt);
|
|
|
|
d_instantiate(path.dentry, inode);
|
|
|
|
err = -ENFILE;
|
|
f = alloc_file(&path, FMODE_WRITE, &pipefifo_fops);
|
|
if (IS_ERR(f))
|
|
goto err_dentry;
|
|
|
|
f->f_flags = O_WRONLY | (flags & (O_NONBLOCK | O_DIRECT));
|
|
f->private_data = inode->i_pipe;
|
|
|
|
res[0] = alloc_file(&path, FMODE_READ, &pipefifo_fops);
|
|
if (IS_ERR(res[0]))
|
|
goto err_file;
|
|
|
|
path_get(&path);
|
|
res[0]->private_data = inode->i_pipe;
|
|
res[0]->f_flags = O_RDONLY | (flags & O_NONBLOCK);
|
|
res[1] = f;
|
|
return 0;
|
|
|
|
err_file:
|
|
put_filp(f);
|
|
err_dentry:
|
|
free_pipe_info(inode->i_pipe);
|
|
path_put(&path);
|
|
return err;
|
|
|
|
err_inode:
|
|
free_pipe_info(inode->i_pipe);
|
|
iput(inode);
|
|
return err;
|
|
}
|
|
|
|
static int __do_pipe_flags(int *fd, struct file **files, int flags)
|
|
{
|
|
int error;
|
|
int fdw, fdr;
|
|
|
|
if (flags & ~(O_CLOEXEC | O_NONBLOCK | O_DIRECT))
|
|
return -EINVAL;
|
|
|
|
error = create_pipe_files(files, flags);
|
|
if (error)
|
|
return error;
|
|
|
|
error = get_unused_fd_flags(flags);
|
|
if (error < 0)
|
|
goto err_read_pipe;
|
|
fdr = error;
|
|
|
|
error = get_unused_fd_flags(flags);
|
|
if (error < 0)
|
|
goto err_fdr;
|
|
fdw = error;
|
|
|
|
audit_fd_pair(fdr, fdw);
|
|
fd[0] = fdr;
|
|
fd[1] = fdw;
|
|
return 0;
|
|
|
|
err_fdr:
|
|
put_unused_fd(fdr);
|
|
err_read_pipe:
|
|
fput(files[0]);
|
|
fput(files[1]);
|
|
return error;
|
|
}
|
|
|
|
int do_pipe_flags(int *fd, int flags)
|
|
{
|
|
struct file *files[2];
|
|
int error = __do_pipe_flags(fd, files, flags);
|
|
if (!error) {
|
|
fd_install(fd[0], files[0]);
|
|
fd_install(fd[1], files[1]);
|
|
}
|
|
return error;
|
|
}
|
|
|
|
/*
|
|
* sys_pipe() is the normal C calling standard for creating
|
|
* a pipe. It's not the way Unix traditionally does this, though.
|
|
*/
|
|
SYSCALL_DEFINE2(pipe2, int __user *, fildes, int, flags)
|
|
{
|
|
struct file *files[2];
|
|
int fd[2];
|
|
int error;
|
|
|
|
error = __do_pipe_flags(fd, files, flags);
|
|
if (!error) {
|
|
if (unlikely(copy_to_user(fildes, fd, sizeof(fd)))) {
|
|
fput(files[0]);
|
|
fput(files[1]);
|
|
put_unused_fd(fd[0]);
|
|
put_unused_fd(fd[1]);
|
|
error = -EFAULT;
|
|
} else {
|
|
fd_install(fd[0], files[0]);
|
|
fd_install(fd[1], files[1]);
|
|
}
|
|
}
|
|
return error;
|
|
}
|
|
|
|
SYSCALL_DEFINE1(pipe, int __user *, fildes)
|
|
{
|
|
return sys_pipe2(fildes, 0);
|
|
}
|
|
|
|
static int wait_for_partner(struct pipe_inode_info *pipe, unsigned int *cnt)
|
|
{
|
|
int cur = *cnt;
|
|
|
|
while (cur == *cnt) {
|
|
pipe_wait(pipe);
|
|
if (signal_pending(current))
|
|
break;
|
|
}
|
|
return cur == *cnt ? -ERESTARTSYS : 0;
|
|
}
|
|
|
|
static void wake_up_partner(struct pipe_inode_info *pipe)
|
|
{
|
|
wake_up_interruptible(&pipe->wait);
|
|
}
|
|
|
|
static int fifo_open(struct inode *inode, struct file *filp)
|
|
{
|
|
struct pipe_inode_info *pipe;
|
|
bool is_pipe = inode->i_sb->s_magic == PIPEFS_MAGIC;
|
|
int ret;
|
|
|
|
filp->f_version = 0;
|
|
|
|
spin_lock(&inode->i_lock);
|
|
if (inode->i_pipe) {
|
|
pipe = inode->i_pipe;
|
|
pipe->files++;
|
|
spin_unlock(&inode->i_lock);
|
|
} else {
|
|
spin_unlock(&inode->i_lock);
|
|
pipe = alloc_pipe_info();
|
|
if (!pipe)
|
|
return -ENOMEM;
|
|
pipe->files = 1;
|
|
spin_lock(&inode->i_lock);
|
|
if (unlikely(inode->i_pipe)) {
|
|
inode->i_pipe->files++;
|
|
spin_unlock(&inode->i_lock);
|
|
free_pipe_info(pipe);
|
|
pipe = inode->i_pipe;
|
|
} else {
|
|
inode->i_pipe = pipe;
|
|
spin_unlock(&inode->i_lock);
|
|
}
|
|
}
|
|
filp->private_data = pipe;
|
|
/* OK, we have a pipe and it's pinned down */
|
|
|
|
__pipe_lock(pipe);
|
|
|
|
/* We can only do regular read/write on fifos */
|
|
filp->f_mode &= (FMODE_READ | FMODE_WRITE);
|
|
|
|
switch (filp->f_mode) {
|
|
case FMODE_READ:
|
|
/*
|
|
* O_RDONLY
|
|
* POSIX.1 says that O_NONBLOCK means return with the FIFO
|
|
* opened, even when there is no process writing the FIFO.
|
|
*/
|
|
pipe->r_counter++;
|
|
if (pipe->readers++ == 0)
|
|
wake_up_partner(pipe);
|
|
|
|
if (!is_pipe && !pipe->writers) {
|
|
if ((filp->f_flags & O_NONBLOCK)) {
|
|
/* suppress POLLHUP until we have
|
|
* seen a writer */
|
|
filp->f_version = pipe->w_counter;
|
|
} else {
|
|
if (wait_for_partner(pipe, &pipe->w_counter))
|
|
goto err_rd;
|
|
}
|
|
}
|
|
break;
|
|
|
|
case FMODE_WRITE:
|
|
/*
|
|
* O_WRONLY
|
|
* POSIX.1 says that O_NONBLOCK means return -1 with
|
|
* errno=ENXIO when there is no process reading the FIFO.
|
|
*/
|
|
ret = -ENXIO;
|
|
if (!is_pipe && (filp->f_flags & O_NONBLOCK) && !pipe->readers)
|
|
goto err;
|
|
|
|
pipe->w_counter++;
|
|
if (!pipe->writers++)
|
|
wake_up_partner(pipe);
|
|
|
|
if (!is_pipe && !pipe->readers) {
|
|
if (wait_for_partner(pipe, &pipe->r_counter))
|
|
goto err_wr;
|
|
}
|
|
break;
|
|
|
|
case FMODE_READ | FMODE_WRITE:
|
|
/*
|
|
* O_RDWR
|
|
* POSIX.1 leaves this case "undefined" when O_NONBLOCK is set.
|
|
* This implementation will NEVER block on a O_RDWR open, since
|
|
* the process can at least talk to itself.
|
|
*/
|
|
|
|
pipe->readers++;
|
|
pipe->writers++;
|
|
pipe->r_counter++;
|
|
pipe->w_counter++;
|
|
if (pipe->readers == 1 || pipe->writers == 1)
|
|
wake_up_partner(pipe);
|
|
break;
|
|
|
|
default:
|
|
ret = -EINVAL;
|
|
goto err;
|
|
}
|
|
|
|
/* Ok! */
|
|
__pipe_unlock(pipe);
|
|
return 0;
|
|
|
|
err_rd:
|
|
if (!--pipe->readers)
|
|
wake_up_interruptible(&pipe->wait);
|
|
ret = -ERESTARTSYS;
|
|
goto err;
|
|
|
|
err_wr:
|
|
if (!--pipe->writers)
|
|
wake_up_interruptible(&pipe->wait);
|
|
ret = -ERESTARTSYS;
|
|
goto err;
|
|
|
|
err:
|
|
__pipe_unlock(pipe);
|
|
|
|
put_pipe_info(inode, pipe);
|
|
return ret;
|
|
}
|
|
|
|
const struct file_operations pipefifo_fops = {
|
|
.open = fifo_open,
|
|
.llseek = no_llseek,
|
|
.read = do_sync_read,
|
|
.aio_read = pipe_read,
|
|
.write = do_sync_write,
|
|
.aio_write = pipe_write,
|
|
.poll = pipe_poll,
|
|
.unlocked_ioctl = pipe_ioctl,
|
|
.release = pipe_release,
|
|
.fasync = pipe_fasync,
|
|
};
|
|
|
|
/*
|
|
* Allocate a new array of pipe buffers and copy the info over. Returns the
|
|
* pipe size if successful, or return -ERROR on error.
|
|
*/
|
|
static long pipe_set_size(struct pipe_inode_info *pipe, unsigned long nr_pages)
|
|
{
|
|
struct pipe_buffer *bufs;
|
|
|
|
/*
|
|
* We can shrink the pipe, if arg >= pipe->nrbufs. Since we don't
|
|
* expect a lot of shrink+grow operations, just free and allocate
|
|
* again like we would do for growing. If the pipe currently
|
|
* contains more buffers than arg, then return busy.
|
|
*/
|
|
if (nr_pages < pipe->nrbufs)
|
|
return -EBUSY;
|
|
|
|
bufs = kcalloc(nr_pages, sizeof(*bufs), GFP_KERNEL | __GFP_NOWARN);
|
|
if (unlikely(!bufs))
|
|
return -ENOMEM;
|
|
|
|
/*
|
|
* The pipe array wraps around, so just start the new one at zero
|
|
* and adjust the indexes.
|
|
*/
|
|
if (pipe->nrbufs) {
|
|
unsigned int tail;
|
|
unsigned int head;
|
|
|
|
tail = pipe->curbuf + pipe->nrbufs;
|
|
if (tail < pipe->buffers)
|
|
tail = 0;
|
|
else
|
|
tail &= (pipe->buffers - 1);
|
|
|
|
head = pipe->nrbufs - tail;
|
|
if (head)
|
|
memcpy(bufs, pipe->bufs + pipe->curbuf, head * sizeof(struct pipe_buffer));
|
|
if (tail)
|
|
memcpy(bufs + head, pipe->bufs, tail * sizeof(struct pipe_buffer));
|
|
}
|
|
|
|
pipe->curbuf = 0;
|
|
kfree(pipe->bufs);
|
|
pipe->bufs = bufs;
|
|
pipe->buffers = nr_pages;
|
|
return nr_pages * PAGE_SIZE;
|
|
}
|
|
|
|
/*
|
|
* Currently we rely on the pipe array holding a power-of-2 number
|
|
* of pages.
|
|
*/
|
|
static inline unsigned int round_pipe_size(unsigned int size)
|
|
{
|
|
unsigned long nr_pages;
|
|
|
|
nr_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
|
|
return roundup_pow_of_two(nr_pages) << PAGE_SHIFT;
|
|
}
|
|
|
|
/*
|
|
* This should work even if CONFIG_PROC_FS isn't set, as proc_dointvec_minmax
|
|
* will return an error.
|
|
*/
|
|
int pipe_proc_fn(struct ctl_table *table, int write, void __user *buf,
|
|
size_t *lenp, loff_t *ppos)
|
|
{
|
|
int ret;
|
|
|
|
ret = proc_dointvec_minmax(table, write, buf, lenp, ppos);
|
|
if (ret < 0 || !write)
|
|
return ret;
|
|
|
|
pipe_max_size = round_pipe_size(pipe_max_size);
|
|
return ret;
|
|
}
|
|
|
|
/*
|
|
* After the inode slimming patch, i_pipe/i_bdev/i_cdev share the same
|
|
* location, so checking ->i_pipe is not enough to verify that this is a
|
|
* pipe.
|
|
*/
|
|
struct pipe_inode_info *get_pipe_info(struct file *file)
|
|
{
|
|
return file->f_op == &pipefifo_fops ? file->private_data : NULL;
|
|
}
|
|
|
|
long pipe_fcntl(struct file *file, unsigned int cmd, unsigned long arg)
|
|
{
|
|
struct pipe_inode_info *pipe;
|
|
long ret;
|
|
|
|
pipe = get_pipe_info(file);
|
|
if (!pipe)
|
|
return -EBADF;
|
|
|
|
__pipe_lock(pipe);
|
|
|
|
switch (cmd) {
|
|
case F_SETPIPE_SZ: {
|
|
unsigned int size, nr_pages;
|
|
|
|
size = round_pipe_size(arg);
|
|
nr_pages = size >> PAGE_SHIFT;
|
|
|
|
ret = -EINVAL;
|
|
if (!nr_pages)
|
|
goto out;
|
|
|
|
if (!capable(CAP_SYS_RESOURCE) && size > pipe_max_size) {
|
|
ret = -EPERM;
|
|
goto out;
|
|
}
|
|
ret = pipe_set_size(pipe, nr_pages);
|
|
break;
|
|
}
|
|
case F_GETPIPE_SZ:
|
|
ret = pipe->buffers * PAGE_SIZE;
|
|
break;
|
|
default:
|
|
ret = -EINVAL;
|
|
break;
|
|
}
|
|
|
|
out:
|
|
__pipe_unlock(pipe);
|
|
return ret;
|
|
}
|
|
|
|
static const struct super_operations pipefs_ops = {
|
|
.destroy_inode = free_inode_nonrcu,
|
|
.statfs = simple_statfs,
|
|
};
|
|
|
|
/*
|
|
* pipefs should _never_ be mounted by userland - too much of security hassle,
|
|
* no real gain from having the whole whorehouse mounted. So we don't need
|
|
* any operations on the root directory. However, we need a non-trivial
|
|
* d_name - pipe: will go nicely and kill the special-casing in procfs.
|
|
*/
|
|
static struct dentry *pipefs_mount(struct file_system_type *fs_type,
|
|
int flags, const char *dev_name, void *data)
|
|
{
|
|
return mount_pseudo(fs_type, "pipe:", &pipefs_ops,
|
|
&pipefs_dentry_operations, PIPEFS_MAGIC);
|
|
}
|
|
|
|
static struct file_system_type pipe_fs_type = {
|
|
.name = "pipefs",
|
|
.mount = pipefs_mount,
|
|
.kill_sb = kill_anon_super,
|
|
};
|
|
|
|
static int __init init_pipe_fs(void)
|
|
{
|
|
int err = register_filesystem(&pipe_fs_type);
|
|
|
|
if (!err) {
|
|
pipe_mnt = kern_mount(&pipe_fs_type);
|
|
if (IS_ERR(pipe_mnt)) {
|
|
err = PTR_ERR(pipe_mnt);
|
|
unregister_filesystem(&pipe_fs_type);
|
|
}
|
|
}
|
|
return err;
|
|
}
|
|
|
|
fs_initcall(init_pipe_fs);
|