FreeBSD kernel kern code
vfs_aio.c
Go to the documentation of this file.
1 /*-
2  * Copyright (c) 1997 John S. Dyson. All rights reserved.
3  *
4  * Redistribution and use in source and binary forms, with or without
5  * modification, are permitted provided that the following conditions
6  * are met:
7  * 1. Redistributions of source code must retain the above copyright
8  * notice, this list of conditions and the following disclaimer.
9  * 2. John S. Dyson's name may not be used to endorse or promote products
10  * derived from this software without specific prior written permission.
11  *
12  * DISCLAIMER: This code isn't warranted to do anything useful. Anything
13  * bad that happens because of using this software isn't the responsibility
14  * of the author. This software is distributed AS-IS.
15  */
16 
17 /*
18  * This file contains support for the POSIX 1003.1B AIO/LIO facility.
19  */
20 
21 #include <sys/cdefs.h>
22 __FBSDID("$BSDSUniX$");
23 
24 #include "opt_compat.h"
25 
26 #include <sys/param.h>
27 #include <sys/systm.h>
28 #include <sys/malloc.h>
29 #include <sys/bio.h>
30 #include <sys/buf.h>
31 #include <sys/capability.h>
32 #include <sys/eventhandler.h>
33 #include <sys/sysproto.h>
34 #include <sys/filedesc.h>
35 #include <sys/kernel.h>
36 #include <sys/module.h>
37 #include <sys/kthread.h>
38 #include <sys/fcntl.h>
39 #include <sys/file.h>
40 #include <sys/limits.h>
41 #include <sys/lock.h>
42 #include <sys/mutex.h>
43 #include <sys/unistd.h>
44 #include <sys/posix4.h>
45 #include <sys/proc.h>
46 #include <sys/resourcevar.h>
47 #include <sys/signalvar.h>
48 #include <sys/protosw.h>
49 #include <sys/sema.h>
50 #include <sys/socket.h>
51 #include <sys/socketvar.h>
52 #include <sys/syscall.h>
53 #include <sys/sysent.h>
54 #include <sys/sysctl.h>
55 #include <sys/sx.h>
56 #include <sys/taskqueue.h>
57 #include <sys/vnode.h>
58 #include <sys/conf.h>
59 #include <sys/event.h>
60 #include <sys/mount.h>
61 
62 #include <machine/atomic.h>
63 
64 #include <vm/vm.h>
65 #include <vm/vm_extern.h>
66 #include <vm/pmap.h>
67 #include <vm/vm_map.h>
68 #include <vm/vm_object.h>
69 #include <vm/uma.h>
70 #include <sys/aio.h>
71 
72 #include "opt_vfs_aio.h"
73 
74 /*
75  * Counter for allocating reference ids to new jobs. Wrapped to 1 on
76  * overflow. (XXX will be removed soon.)
77  */
78 static u_long jobrefid;
79 
80 /*
81  * Counter for aio_fsync.
82  */
83 static uint64_t jobseqno;
84 
85 #define JOBST_NULL 0
86 #define JOBST_JOBQSOCK 1
87 #define JOBST_JOBQGLOBAL 2
88 #define JOBST_JOBRUNNING 3
89 #define JOBST_JOBFINISHED 4
90 #define JOBST_JOBQBUF 5
91 #define JOBST_JOBQSYNC 6
92 
93 #ifndef MAX_AIO_PER_PROC
94 #define MAX_AIO_PER_PROC 32
95 #endif
96 
97 #ifndef MAX_AIO_QUEUE_PER_PROC
98 #define MAX_AIO_QUEUE_PER_PROC 256 /* Bigger than AIO_LISTIO_MAX */
99 #endif
100 
101 #ifndef MAX_AIO_PROCS
102 #define MAX_AIO_PROCS 32
103 #endif
104 
105 #ifndef MAX_AIO_QUEUE
106 #define MAX_AIO_QUEUE 1024 /* Bigger than AIO_LISTIO_MAX */
107 #endif
108 
109 #ifndef TARGET_AIO_PROCS
110 #define TARGET_AIO_PROCS 4
111 #endif
112 
113 #ifndef MAX_BUF_AIO
114 #define MAX_BUF_AIO 16
115 #endif
116 
117 #ifndef AIOD_TIMEOUT_DEFAULT
118 #define AIOD_TIMEOUT_DEFAULT (10 * hz)
119 #endif
120 
121 #ifndef AIOD_LIFETIME_DEFAULT
122 #define AIOD_LIFETIME_DEFAULT (30 * hz)
123 #endif
124 
125 FEATURE(aio, "Asynchronous I/O");
126 
127 static MALLOC_DEFINE(M_LIO, "lio", "listio aio control block list");
128 
129 static SYSCTL_NODE(_vfs, OID_AUTO, aio, CTLFLAG_RW, 0, "Async IO management");
130 
132 SYSCTL_INT(_vfs_aio, OID_AUTO, max_aio_procs,
133  CTLFLAG_RW, &max_aio_procs, 0,
134  "Maximum number of kernel threads to use for handling async IO ");
135 
136 static int num_aio_procs = 0;
137 SYSCTL_INT(_vfs_aio, OID_AUTO, num_aio_procs,
138  CTLFLAG_RD, &num_aio_procs, 0,
139  "Number of presently active kernel threads for async IO");
140 
141 /*
142  * The code will adjust the actual number of AIO processes towards this
143  * number when it gets a chance.
144  */
146 SYSCTL_INT(_vfs_aio, OID_AUTO, target_aio_procs, CTLFLAG_RW, &target_aio_procs,
147  0, "Preferred number of ready kernel threads for async IO");
148 
150 SYSCTL_INT(_vfs_aio, OID_AUTO, max_aio_queue, CTLFLAG_RW, &max_queue_count, 0,
151  "Maximum number of aio requests to queue, globally");
152 
153 static int num_queue_count = 0;
154 SYSCTL_INT(_vfs_aio, OID_AUTO, num_queue_count, CTLFLAG_RD, &num_queue_count, 0,
155  "Number of queued aio requests");
156 
157 static int num_buf_aio = 0;
158 SYSCTL_INT(_vfs_aio, OID_AUTO, num_buf_aio, CTLFLAG_RD, &num_buf_aio, 0,
159  "Number of aio requests presently handled by the buf subsystem");
160 
161 /* Number of async I/O thread in the process of being started */
162 /* XXX This should be local to aio_aqueue() */
163 static int num_aio_resv_start = 0;
164 
165 static int aiod_timeout;
166 SYSCTL_INT(_vfs_aio, OID_AUTO, aiod_timeout, CTLFLAG_RW, &aiod_timeout, 0,
167  "Timeout value for synchronous aio operations");
168 
169 static int aiod_lifetime;
170 SYSCTL_INT(_vfs_aio, OID_AUTO, aiod_lifetime, CTLFLAG_RW, &aiod_lifetime, 0,
171  "Maximum lifetime for idle aiod");
172 
173 static int unloadable = 0;
174 SYSCTL_INT(_vfs_aio, OID_AUTO, unloadable, CTLFLAG_RW, &unloadable, 0,
175  "Allow unload of aio (not recommended)");
176 
177 
179 SYSCTL_INT(_vfs_aio, OID_AUTO, max_aio_per_proc, CTLFLAG_RW, &max_aio_per_proc,
180  0, "Maximum active aio requests per process (stored in the process)");
181 
183 SYSCTL_INT(_vfs_aio, OID_AUTO, max_aio_queue_per_proc, CTLFLAG_RW,
185  "Maximum queued aio requests per process (stored in the process)");
186 
188 SYSCTL_INT(_vfs_aio, OID_AUTO, max_buf_aio, CTLFLAG_RW, &max_buf_aio, 0,
189  "Maximum buf aio requests per process (stored in the process)");
190 
191 typedef struct oaiocb {
192  int aio_fildes; /* File descriptor */
193  off_t aio_offset; /* File offset for I/O */
194  volatile void *aio_buf; /* I/O buffer in process space */
195  size_t aio_nbytes; /* Number of bytes for I/O */
196  struct osigevent aio_sigevent; /* Signal to deliver */
197  int aio_lio_opcode; /* LIO opcode */
198  int aio_reqprio; /* Request priority -- ignored */
199  struct __aiocb_private _aiocb_private;
200 } oaiocb_t;
201 
202 /*
203  * Below is a key of locks used to protect each member of struct aiocblist
204  * aioliojob and kaioinfo and any backends.
205  *
206  * * - need not protected
207  * a - locked by kaioinfo lock
208  * b - locked by backend lock, the backend lock can be null in some cases,
209  * for example, BIO belongs to this type, in this case, proc lock is
210  * reused.
211  * c - locked by aio_job_mtx, the lock for the generic file I/O backend.
212  */
213 
214 /*
215  * Current, there is only two backends: BIO and generic file I/O.
216  * socket I/O is served by generic file I/O, this is not a good idea, since
217  * disk file I/O and any other types without O_NONBLOCK flag can block daemon
218  * threads, if there is no thread to serve socket I/O, the socket I/O will be
219  * delayed too long or starved, we should create some threads dedicated to
220  * sockets to do non-blocking I/O, same for pipe and fifo, for these I/O
221  * systems we really need non-blocking interface, fiddling O_NONBLOCK in file
222  * structure is not safe because there is race between userland and aio
223  * daemons.
224  */
225 
226 struct aiocblist {
227  TAILQ_ENTRY(aiocblist) list; /* (b) internal list of for backend */
228  TAILQ_ENTRY(aiocblist) plist; /* (a) list of jobs for each backend */
229  TAILQ_ENTRY(aiocblist) allist; /* (a) list of all jobs in proc */
230  int jobflags; /* (a) job flags */
231  int jobstate; /* (b) job state */
232  int inputcharge; /* (*) input blockes */
233  int outputcharge; /* (*) output blockes */
234  struct buf *bp; /* (*) private to BIO backend,
235  * buffer pointer
236  */
237  struct proc *userproc; /* (*) user process */
238  struct ucred *cred; /* (*) active credential when created */
239  struct file *fd_file; /* (*) pointer to file structure */
240  struct aioliojob *lio; /* (*) optional lio job */
241  struct aiocb *uuaiocb; /* (*) pointer in userspace of aiocb */
242  struct knlist klist; /* (a) list of knotes */
243  struct aiocb uaiocb; /* (*) kernel I/O control block */
244  ksiginfo_t ksi; /* (a) realtime signal info */
245  struct task biotask; /* (*) private to BIO backend */
246  uint64_t seqno; /* (*) job number */
247  int pending; /* (a) number of pending I/O, aio_fsync only */
248 };
249 
250 /* jobflags */
251 #define AIOCBLIST_DONE 0x01
252 #define AIOCBLIST_BUFDONE 0x02
253 #define AIOCBLIST_RUNDOWN 0x04
254 #define AIOCBLIST_CHECKSYNC 0x08
255 
256 /*
257  * AIO process info
258  */
259 #define AIOP_FREE 0x1 /* proc on free queue */
260 
262  int aiothreadflags; /* (c) AIO proc flags */
263  TAILQ_ENTRY(aiothreadlist) list; /* (c) list of processes */
264  struct thread *aiothread; /* (*) the AIO thread */
265 };
266 
267 /*
268  * data-structure for lio signal management
269  */
270 struct aioliojob {
271  int lioj_flags; /* (a) listio flags */
272  int lioj_count; /* (a) listio flags */
273  int lioj_finished_count; /* (a) listio flags */
274  struct sigevent lioj_signal; /* (a) signal on all I/O done */
275  TAILQ_ENTRY(aioliojob) lioj_list; /* (a) lio list */
276  struct knlist klist; /* (a) list of knotes */
277  ksiginfo_t lioj_ksi; /* (a) Realtime signal info */
278 };
279 
280 #define LIOJ_SIGNAL 0x1 /* signal on all done (lio) */
281 #define LIOJ_SIGNAL_POSTED 0x2 /* signal has been posted */
282 #define LIOJ_KEVENT_POSTED 0x4 /* kevent triggered */
283 
284 /*
285  * per process aio data structure
286  */
287 struct kaioinfo {
288  struct mtx kaio_mtx; /* the lock to protect this struct */
289  int kaio_flags; /* (a) per process kaio flags */
290  int kaio_maxactive_count; /* (*) maximum number of AIOs */
291  int kaio_active_count; /* (c) number of currently used AIOs */
292  int kaio_qallowed_count; /* (*) maxiumu size of AIO queue */
293  int kaio_count; /* (a) size of AIO queue */
294  int kaio_ballowed_count; /* (*) maximum number of buffers */
295  int kaio_buffer_count; /* (a) number of physio buffers */
296  TAILQ_HEAD(,aiocblist) kaio_all; /* (a) all AIOs in the process */
297  TAILQ_HEAD(,aiocblist) kaio_done; /* (a) done queue for process */
298  TAILQ_HEAD(,aioliojob) kaio_liojoblist; /* (a) list of lio jobs */
299  TAILQ_HEAD(,aiocblist) kaio_jobqueue; /* (a) job queue for process */
300  TAILQ_HEAD(,aiocblist) kaio_bufqueue; /* (a) buffer job queue for process */
301  TAILQ_HEAD(,aiocblist) kaio_sockqueue; /* (a) queue for aios waiting on sockets,
302  * NOT USED YET.
303  */
304  TAILQ_HEAD(,aiocblist) kaio_syncqueue; /* (a) queue for aio_fsync */
305  struct task kaio_task; /* (*) task to kick aio threads */
306 };
307 
308 #define AIO_LOCK(ki) mtx_lock(&(ki)->kaio_mtx)
309 #define AIO_UNLOCK(ki) mtx_unlock(&(ki)->kaio_mtx)
310 #define AIO_LOCK_ASSERT(ki, f) mtx_assert(&(ki)->kaio_mtx, (f))
311 #define AIO_MTX(ki) (&(ki)->kaio_mtx)
312 
313 #define KAIO_RUNDOWN 0x1 /* process is being run down */
314 #define KAIO_WAKEUP 0x2 /* wakeup process when there is a significant event */
315 
316 /*
317  * Operations used to interact with userland aio control blocks.
318  * Different ABIs provide their own operations.
319  */
320 struct aiocb_ops {
321  int (*copyin)(struct aiocb *ujob, struct aiocb *kjob);
322  long (*fetch_status)(struct aiocb *ujob);
323  long (*fetch_error)(struct aiocb *ujob);
324  int (*store_status)(struct aiocb *ujob, long status);
325  int (*store_error)(struct aiocb *ujob, long error);
326  int (*store_kernelinfo)(struct aiocb *ujob, long jobref);
327  int (*store_aiocb)(struct aiocb **ujobp, struct aiocb *ujob);
328 };
329 
330 static TAILQ_HEAD(,aiothreadlist) aio_freeproc; /* (c) Idle daemons */
331 static struct sema aio_newproc_sem;
332 static struct mtx aio_job_mtx;
333 static struct mtx aio_sock_mtx;
334 static TAILQ_HEAD(,aiocblist) aio_jobs; /* (c) Async job list */
335 static struct unrhdr *aiod_unr;
336 
337 void aio_init_aioinfo(struct proc *p);
338 static int aio_onceonly(void);
339 static int aio_free_entry(struct aiocblist *aiocbe);
340 static void aio_process(struct aiocblist *aiocbe);
341 static int aio_newproc(int *);
342 int aio_aqueue(struct thread *td, struct aiocb *job,
343  struct aioliojob *lio, int type, struct aiocb_ops *ops);
344 static void aio_physwakeup(struct buf *bp);
345 static void aio_proc_rundown(void *arg, struct proc *p);
346 static void aio_proc_rundown_exec(void *arg, struct proc *p, struct image_params *imgp);
347 static int aio_qphysio(struct proc *p, struct aiocblist *iocb);
348 static void biohelper(void *, int);
349 static void aio_daemon(void *param);
350 static void aio_swake_cb(struct socket *, struct sockbuf *);
351 static int aio_unload(void);
352 static void aio_bio_done_notify(struct proc *userp, struct aiocblist *aiocbe, int type);
353 #define DONE_BUF 1
354 #define DONE_QUEUE 2
355 static int aio_kick(struct proc *userp);
356 static void aio_kick_nowait(struct proc *userp);
357 static void aio_kick_helper(void *context, int pending);
358 static int filt_aioattach(struct knote *kn);
359 static void filt_aiodetach(struct knote *kn);
360 static int filt_aio(struct knote *kn, long hint);
361 static int filt_lioattach(struct knote *kn);
362 static void filt_liodetach(struct knote *kn);
363 static int filt_lio(struct knote *kn, long hint);
364 
365 /*
366  * Zones for:
367  * kaio Per process async io info
368  * aiop async io thread data
369  * aiocb async io jobs
370  * aiol list io job pointer - internal to aio_suspend XXX
371  * aiolio list io jobs
372  */
373 static uma_zone_t kaio_zone, aiop_zone, aiocb_zone, aiol_zone, aiolio_zone;
374 
375 /* kqueue filters for aio */
376 static struct filterops aio_filtops = {
377  .f_isfd = 0,
378  .f_attach = filt_aioattach,
379  .f_detach = filt_aiodetach,
380  .f_event = filt_aio,
381 };
382 static struct filterops lio_filtops = {
383  .f_isfd = 0,
384  .f_attach = filt_lioattach,
385  .f_detach = filt_liodetach,
386  .f_event = filt_lio
387 };
388 
389 static eventhandler_tag exit_tag, exec_tag;
390 
391 TASKQUEUE_DEFINE_THREAD(aiod_bio);
392 
393 /*
394  * Main operations function for use as a kernel module.
395  */
396 static int
397 aio_modload(struct module *module, int cmd, void *arg)
398 {
399  int error = 0;
400 
401  switch (cmd) {
402  case MOD_LOAD:
403  aio_onceonly();
404  break;
405  case MOD_UNLOAD:
406  error = aio_unload();
407  break;
408  case MOD_SHUTDOWN:
409  break;
410  default:
411  error = EINVAL;
412  break;
413  }
414  return (error);
415 }
416 
417 static moduledata_t aio_mod = {
418  "aio",
419  &aio_modload,
420  NULL
421 };
422 
423 static struct syscall_helper_data aio_syscalls[] = {
424  SYSCALL_INIT_HELPER(aio_cancel),
425  SYSCALL_INIT_HELPER(aio_error),
426  SYSCALL_INIT_HELPER(aio_fsync),
427  SYSCALL_INIT_HELPER(aio_read),
428  SYSCALL_INIT_HELPER(aio_return),
429  SYSCALL_INIT_HELPER(aio_suspend),
430  SYSCALL_INIT_HELPER(aio_waitcomplete),
431  SYSCALL_INIT_HELPER(aio_write),
432  SYSCALL_INIT_HELPER(lio_listio),
433  SYSCALL_INIT_HELPER(oaio_read),
434  SYSCALL_INIT_HELPER(oaio_write),
435  SYSCALL_INIT_HELPER(olio_listio),
436  SYSCALL_INIT_LAST
437 };
438 
439 #ifdef COMPAT_32BIT
440 #include <sys/mount.h>
441 #include <sys/socket.h>
442 #include <compat/compat32bit/compat32bit.h>
443 #include <compat/compat32bit/compat32bit_proto.h>
444 #include <compat/compat32bit/compat32bit_signal.h>
445 #include <compat/compat32bit/compat32bit_syscall.h>
446 #include <compat/compat32bit/compat32bit_util.h>
447 
448 static struct syscall_helper_data aio32_syscalls[] = {
449  SYSCALL32_INIT_HELPER(compat32bit_aio_return),
450  SYSCALL32_INIT_HELPER(compat32bit_aio_suspend),
451  SYSCALL32_INIT_HELPER(compat32bit_aio_cancel),
452  SYSCALL32_INIT_HELPER(compat32bit_aio_error),
453  SYSCALL32_INIT_HELPER(compat32bit_aio_fsync),
454  SYSCALL32_INIT_HELPER(compat32bit_aio_read),
455  SYSCALL32_INIT_HELPER(compat32bit_aio_write),
456  SYSCALL32_INIT_HELPER(compat32bit_aio_waitcomplete),
457  SYSCALL32_INIT_HELPER(compat32bit_lio_listio),
458  SYSCALL32_INIT_HELPER(compat32bit_oaio_read),
459  SYSCALL32_INIT_HELPER(compat32bit_oaio_write),
460  SYSCALL32_INIT_HELPER(compat32bit_olio_listio),
461  SYSCALL_INIT_LAST
462 };
463 #endif
464 
465 DECLARE_MODULE(aio, aio_mod,
466  SI_SUB_VFS, SI_ORDER_ANY);
467 MODULE_VERSION(aio, 1);
468 
469 /*
470  * Startup initialization
471  */
472 static int
474 {
475  int error;
476 
477  /* XXX: should probably just use so->callback */
479  exit_tag = EVENTHANDLER_REGISTER(process_exit, aio_proc_rundown, NULL,
480  EVENTHANDLER_PRI_ANY);
481  exec_tag = EVENTHANDLER_REGISTER(process_exec, aio_proc_rundown_exec, NULL,
482  EVENTHANDLER_PRI_ANY);
483  kqueue_add_filteropts(EVFILT_AIO, &aio_filtops);
484  kqueue_add_filteropts(EVFILT_LIO, &lio_filtops);
485  TAILQ_INIT(&aio_freeproc);
486  sema_init(&aio_newproc_sem, 0, "aio_new_proc");
487  mtx_init(&aio_job_mtx, "aio_job", NULL, MTX_DEF);
488  mtx_init(&aio_sock_mtx, "aio_sock", NULL, MTX_DEF);
489  TAILQ_INIT(&aio_jobs);
490  aiod_unr = new_unrhdr(1, INT_MAX, NULL);
491  kaio_zone = uma_zcreate("AIO", sizeof(struct kaioinfo), NULL, NULL,
492  NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_NOFREE);
493  aiop_zone = uma_zcreate("AIOP", sizeof(struct aiothreadlist), NULL,
494  NULL, NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_NOFREE);
495  aiocb_zone = uma_zcreate("AIOCB", sizeof(struct aiocblist), NULL, NULL,
496  NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_NOFREE);
497  aiol_zone = uma_zcreate("AIOL", AIO_LISTIO_MAX*sizeof(intptr_t) , NULL,
498  NULL, NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_NOFREE);
499  aiolio_zone = uma_zcreate("AIOLIO", sizeof(struct aioliojob), NULL,
500  NULL, NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_NOFREE);
503  jobrefid = 1;
504  async_io_version = _POSIX_VERSION;
505  p31b_setcfg(CTL_P1003_1B_AIO_LISTIO_MAX, AIO_LISTIO_MAX);
506  p31b_setcfg(CTL_P1003_1B_AIO_MAX, MAX_AIO_QUEUE);
507  p31b_setcfg(CTL_P1003_1B_AIO_PRIO_DELTA_MAX, 0);
508 
509  error = syscall_helper_register(aio_syscalls);
510  if (error)
511  return (error);
512 #ifdef COMPAT_32BIT
513  error = syscall32_helper_register(aio32_syscalls);
514  if (error)
515  return (error);
516 #endif
517  return (0);
518 }
519 
520 /*
521  * Callback for unload of AIO when used as a module.
522  */
523 static int
525 {
526  int error;
527 
528  /*
529  * XXX: no unloads by default, it's too dangerous.
530  * perhaps we could do it if locked out callers and then
531  * did an aio_proc_rundown() on each process.
532  *
533  * jhb: aio_proc_rundown() needs to run on curproc though,
534  * so I don't think that would fly.
535  */
536  if (!unloadable)
537  return (EOPNOTSUPP);
538 
539 #ifdef COMPAT_32BIT
540  syscall32_helper_unregister(aio32_syscalls);
541 #endif
542  syscall_helper_unregister(aio_syscalls);
543 
544  error = kqueue_del_filteropts(EVFILT_AIO);
545  if (error)
546  return error;
547  error = kqueue_del_filteropts(EVFILT_LIO);
548  if (error)
549  return error;
550  async_io_version = 0;
551  aio_swake = NULL;
552  taskqueue_free(taskqueue_aiod_bio);
553  delete_unrhdr(aiod_unr);
554  uma_zdestroy(kaio_zone);
555  uma_zdestroy(aiop_zone);
556  uma_zdestroy(aiocb_zone);
557  uma_zdestroy(aiol_zone);
558  uma_zdestroy(aiolio_zone);
559  EVENTHANDLER_DEREGISTER(process_exit, exit_tag);
560  EVENTHANDLER_DEREGISTER(process_exec, exec_tag);
561  mtx_destroy(&aio_job_mtx);
562  mtx_destroy(&aio_sock_mtx);
563  sema_destroy(&aio_newproc_sem);
564  p31b_setcfg(CTL_P1003_1B_AIO_LISTIO_MAX, -1);
565  p31b_setcfg(CTL_P1003_1B_AIO_MAX, -1);
566  p31b_setcfg(CTL_P1003_1B_AIO_PRIO_DELTA_MAX, -1);
567  return (0);
568 }
569 
570 /*
571  * Init the per-process aioinfo structure. The aioinfo limits are set
572  * per-process for user limit (resource) management.
573  */
574 void
575 aio_init_aioinfo(struct proc *p)
576 {
577  struct kaioinfo *ki;
578 
579  ki = uma_zalloc(kaio_zone, M_WAITOK);
580  mtx_init(&ki->kaio_mtx, "aiomtx", NULL, MTX_DEF);
581  ki->kaio_flags = 0;
583  ki->kaio_active_count = 0;
585  ki->kaio_count = 0;
587  ki->kaio_buffer_count = 0;
588  TAILQ_INIT(&ki->kaio_all);
589  TAILQ_INIT(&ki->kaio_done);
590  TAILQ_INIT(&ki->kaio_jobqueue);
591  TAILQ_INIT(&ki->kaio_bufqueue);
592  TAILQ_INIT(&ki->kaio_liojoblist);
593  TAILQ_INIT(&ki->kaio_sockqueue);
594  TAILQ_INIT(&ki->kaio_syncqueue);
595  TASK_INIT(&ki->kaio_task, 0, aio_kick_helper, p);
596  PROC_LOCK(p);
597  if (p->p_aioinfo == NULL) {
598  p->p_aioinfo = ki;
599  PROC_UNLOCK(p);
600  } else {
601  PROC_UNLOCK(p);
602  mtx_destroy(&ki->kaio_mtx);
603  uma_zfree(kaio_zone, ki);
604  }
605 
607  aio_newproc(NULL);
608 }
609 
610 static int
611 aio_sendsig(struct proc *p, struct sigevent *sigev, ksiginfo_t *ksi)
612 {
613  struct thread *td;
614  int error;
615 
616  error = sigev_findtd(p, sigev, &td);
617  if (error)
618  return (error);
619  if (!KSI_ONQ(ksi)) {
620  ksiginfo_set_sigev(ksi, sigev);
621  ksi->ksi_code = SI_ASYNCIO;
622  ksi->ksi_flags |= KSI_EXT | KSI_INS;
623  tdsendsignal(p, td, ksi->ksi_signo, ksi);
624  }
625  PROC_UNLOCK(p);
626  return (error);
627 }
628 
629 /*
630  * Free a job entry. Wait for completion if it is currently active, but don't
631  * delay forever. If we delay, we return a flag that says that we have to
632  * restart the queue scan.
633  */
634 static int
635 aio_free_entry(struct aiocblist *aiocbe)
636 {
637  struct kaioinfo *ki;
638  struct aioliojob *lj;
639  struct proc *p;
640 
641  p = aiocbe->userproc;
642  MPASS(curproc == p);
643  ki = p->p_aioinfo;
644  MPASS(ki != NULL);
645 
646  AIO_LOCK_ASSERT(ki, MA_OWNED);
647  MPASS(aiocbe->jobstate == JOBST_JOBFINISHED);
648 
649  atomic_subtract_int(&num_queue_count, 1);
650 
651  ki->kaio_count--;
652  MPASS(ki->kaio_count >= 0);
653 
654  TAILQ_REMOVE(&ki->kaio_done, aiocbe, plist);
655  TAILQ_REMOVE(&ki->kaio_all, aiocbe, allist);
656 
657  lj = aiocbe->lio;
658  if (lj) {
659  lj->lioj_count--;
660  lj->lioj_finished_count--;
661 
662  if (lj->lioj_count == 0) {
663  TAILQ_REMOVE(&ki->kaio_liojoblist, lj, lioj_list);
664  /* lio is going away, we need to destroy any knotes */
665  knlist_delete(&lj->klist, curthread, 1);
666  PROC_LOCK(p);
667  sigqueue_take(&lj->lioj_ksi);
668  PROC_UNLOCK(p);
669  uma_zfree(aiolio_zone, lj);
670  }
671  }
672 
673  /* aiocbe is going away, we need to destroy any knotes */
674  knlist_delete(&aiocbe->klist, curthread, 1);
675  PROC_LOCK(p);
676  sigqueue_take(&aiocbe->ksi);
677  PROC_UNLOCK(p);
678 
679  MPASS(aiocbe->bp == NULL);
680  aiocbe->jobstate = JOBST_NULL;
681  AIO_UNLOCK(ki);
682 
683  /*
684  * The thread argument here is used to find the owning process
685  * and is also passed to fo_close() which may pass it to various
686  * places such as devsw close() routines. Because of that, we
687  * need a thread pointer from the process owning the job that is
688  * persistent and won't disappear out from under us or move to
689  * another process.
690  *
691  * Currently, all the callers of this function call it to remove
692  * an aiocblist from the current process' job list either via a
693  * syscall or due to the current process calling exit() or
694  * execve(). Thus, we know that p == curproc. We also know that
695  * curthread can't exit since we are curthread.
696  *
697  * Therefore, we use curthread as the thread to pass to
698  * knlist_delete(). This does mean that it is possible for the
699  * thread pointer at close time to differ from the thread pointer
700  * at open time, but this is already true of file descriptors in
701  * a multithreaded process.
702  */
703  fdrop(aiocbe->fd_file, curthread);
704  crfree(aiocbe->cred);
705  uma_zfree(aiocb_zone, aiocbe);
706  AIO_LOCK(ki);
707 
708  return (0);
709 }
710 
711 static void
712 aio_proc_rundown_exec(void *arg, struct proc *p, struct image_params *imgp __unused)
713 {
714  aio_proc_rundown(arg, p);
715 }
716 
717 /*
718  * Rundown the jobs for a given process.
719  */
720 static void
721 aio_proc_rundown(void *arg, struct proc *p)
722 {
723  struct kaioinfo *ki;
724  struct aioliojob *lj;
725  struct aiocblist *cbe, *cbn;
726  struct file *fp;
727  struct socket *so;
728  int remove;
729 
730  KASSERT(curthread->td_proc == p,
731  ("%s: called on non-curproc", __func__));
732  ki = p->p_aioinfo;
733  if (ki == NULL)
734  return;
735 
736  AIO_LOCK(ki);
737  ki->kaio_flags |= KAIO_RUNDOWN;
738 
739 restart:
740 
741  /*
742  * Try to cancel all pending requests. This code simulates
743  * aio_cancel on all pending I/O requests.
744  */
745  TAILQ_FOREACH_SAFE(cbe, &ki->kaio_jobqueue, plist, cbn) {
746  remove = 0;
747  mtx_lock(&aio_job_mtx);
748  if (cbe->jobstate == JOBST_JOBQGLOBAL) {
749  TAILQ_REMOVE(&aio_jobs, cbe, list);
750  remove = 1;
751  } else if (cbe->jobstate == JOBST_JOBQSOCK) {
752  fp = cbe->fd_file;
753  MPASS(fp->f_type == DTYPE_SOCKET);
754  so = fp->f_data;
755  TAILQ_REMOVE(&so->so_aiojobq, cbe, list);
756  remove = 1;
757  } else if (cbe->jobstate == JOBST_JOBQSYNC) {
758  TAILQ_REMOVE(&ki->kaio_syncqueue, cbe, list);
759  remove = 1;
760  }
761  mtx_unlock(&aio_job_mtx);
762 
763  if (remove) {
764  cbe->jobstate = JOBST_JOBFINISHED;
765  cbe->uaiocb._aiocb_private.status = -1;
766  cbe->uaiocb._aiocb_private.error = ECANCELED;
767  TAILQ_REMOVE(&ki->kaio_jobqueue, cbe, plist);
769  }
770  }
771 
772  /* Wait for all running I/O to be finished */
773  if (TAILQ_FIRST(&ki->kaio_bufqueue) ||
774  TAILQ_FIRST(&ki->kaio_jobqueue)) {
775  ki->kaio_flags |= KAIO_WAKEUP;
776  msleep(&p->p_aioinfo, AIO_MTX(ki), PRIBIO, "aioprn", hz);
777  goto restart;
778  }
779 
780  /* Free all completed I/O requests. */
781  while ((cbe = TAILQ_FIRST(&ki->kaio_done)) != NULL)
782  aio_free_entry(cbe);
783 
784  while ((lj = TAILQ_FIRST(&ki->kaio_liojoblist)) != NULL) {
785  if (lj->lioj_count == 0) {
786  TAILQ_REMOVE(&ki->kaio_liojoblist, lj, lioj_list);
787  knlist_delete(&lj->klist, curthread, 1);
788  PROC_LOCK(p);
789  sigqueue_take(&lj->lioj_ksi);
790  PROC_UNLOCK(p);
791  uma_zfree(aiolio_zone, lj);
792  } else {
793  panic("LIO job not cleaned up: C:%d, FC:%d\n",
795  }
796  }
797  AIO_UNLOCK(ki);
798  taskqueue_drain(taskqueue_aiod_bio, &ki->kaio_task);
799  mtx_destroy(&ki->kaio_mtx);
800  uma_zfree(kaio_zone, ki);
801  p->p_aioinfo = NULL;
802 }
803 
804 /*
805  * Select a job to run (called by an AIO daemon).
806  */
807 static struct aiocblist *
809 {
810  struct aiocblist *aiocbe;
811  struct kaioinfo *ki;
812  struct proc *userp;
813 
814  mtx_assert(&aio_job_mtx, MA_OWNED);
815  TAILQ_FOREACH(aiocbe, &aio_jobs, list) {
816  userp = aiocbe->userproc;
817  ki = userp->p_aioinfo;
818 
819  if (ki->kaio_active_count < ki->kaio_maxactive_count) {
820  TAILQ_REMOVE(&aio_jobs, aiocbe, list);
821  /* Account for currently active jobs. */
822  ki->kaio_active_count++;
823  aiocbe->jobstate = JOBST_JOBRUNNING;
824  break;
825  }
826  }
827  return (aiocbe);
828 }
829 
830 /*
831  * Move all data to a permanent storage device, this code
832  * simulates fsync syscall.
833  */
834 static int
835 aio_fsync_vnode(struct thread *td, struct vnode *vp)
836 {
837  struct mount *mp;
838  int vfslocked;
839  int error;
840 
841  vfslocked = VFS_LOCK_GIANT(vp->v_mount);
842  if ((error = vn_start_write(vp, &mp, V_WAIT | PCATCH)) != 0)
843  goto drop;
844  vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
845  if (vp->v_object != NULL) {
846  VM_OBJECT_LOCK(vp->v_object);
847  vm_object_page_clean(vp->v_object, 0, 0, 0);
848  VM_OBJECT_UNLOCK(vp->v_object);
849  }
850  error = VOP_FSYNC(vp, MNT_WAIT, td);
851 
852  VOP_UNLOCK(vp, 0);
853  vn_finished_write(mp);
854 drop:
855  VFS_UNLOCK_GIANT(vfslocked);
856  return (error);
857 }
858 
859 /*
860  * The AIO processing activity. This is the code that does the I/O request for
861  * the non-physio version of the operations. The normal vn operations are used,
862  * and this code should work in all instances for every type of file, including
863  * pipes, sockets, fifos, and regular files.
864  *
865  * XXX I don't think it works well for socket, pipe, and fifo.
866  */
867 static void
868 aio_process(struct aiocblist *aiocbe)
869 {
870  struct ucred *td_savedcred;
871  struct thread *td;
872  struct aiocb *cb;
873  struct file *fp;
874  struct socket *so;
875  struct uio auio;
876  struct iovec aiov;
877  int cnt;
878  int error;
879  int oublock_st, oublock_end;
880  int inblock_st, inblock_end;
881 
882  td = curthread;
883  td_savedcred = td->td_ucred;
884  td->td_ucred = aiocbe->cred;
885  cb = &aiocbe->uaiocb;
886  fp = aiocbe->fd_file;
887 
888  if (cb->aio_lio_opcode == LIO_SYNC) {
889  error = 0;
890  cnt = 0;
891  if (fp->f_vnode != NULL)
892  error = aio_fsync_vnode(td, fp->f_vnode);
893  cb->_aiocb_private.error = error;
894  cb->_aiocb_private.status = 0;
895  td->td_ucred = td_savedcred;
896  return;
897  }
898 
899  aiov.iov_base = (void *)(uintptr_t)cb->aio_buf;
900  aiov.iov_len = cb->aio_nbytes;
901 
902  auio.uio_iov = &aiov;
903  auio.uio_iovcnt = 1;
904  auio.uio_offset = cb->aio_offset;
905  auio.uio_resid = cb->aio_nbytes;
906  cnt = cb->aio_nbytes;
907  auio.uio_segflg = UIO_USERSPACE;
908  auio.uio_td = td;
909 
910  inblock_st = td->td_ru.ru_inblock;
911  oublock_st = td->td_ru.ru_oublock;
912  /*
913  * aio_aqueue() acquires a reference to the file that is
914  * released in aio_free_entry().
915  */
916  if (cb->aio_lio_opcode == LIO_READ) {
917  auio.uio_rw = UIO_READ;
918  if (auio.uio_resid == 0)
919  error = 0;
920  else
921  error = fo_read(fp, &auio, fp->f_cred, FOF_OFFSET, td);
922  } else {
923  if (fp->f_type == DTYPE_VNODE)
924  bwillwrite();
925  auio.uio_rw = UIO_WRITE;
926  error = fo_write(fp, &auio, fp->f_cred, FOF_OFFSET, td);
927  }
928  inblock_end = td->td_ru.ru_inblock;
929  oublock_end = td->td_ru.ru_oublock;
930 
931  aiocbe->inputcharge = inblock_end - inblock_st;
932  aiocbe->outputcharge = oublock_end - oublock_st;
933 
934  if ((error) && (auio.uio_resid != cnt)) {
935  if (error == ERESTART || error == EINTR || error == EWOULDBLOCK)
936  error = 0;
937  if ((error == EPIPE) && (cb->aio_lio_opcode == LIO_WRITE)) {
938  int sigpipe = 1;
939  if (fp->f_type == DTYPE_SOCKET) {
940  so = fp->f_data;
941  if (so->so_options & SO_NOSIGPIPE)
942  sigpipe = 0;
943  }
944  if (sigpipe) {
945  PROC_LOCK(aiocbe->userproc);
946  kern_psignal(aiocbe->userproc, SIGPIPE);
947  PROC_UNLOCK(aiocbe->userproc);
948  }
949  }
950  }
951 
952  cnt -= auio.uio_resid;
953  cb->_aiocb_private.error = error;
954  cb->_aiocb_private.status = cnt;
955  td->td_ucred = td_savedcred;
956 }
957 
958 static void
959 aio_bio_done_notify(struct proc *userp, struct aiocblist *aiocbe, int type)
960 {
961  struct aioliojob *lj;
962  struct kaioinfo *ki;
963  struct aiocblist *scb, *scbn;
964  int lj_done;
965 
966  ki = userp->p_aioinfo;
967  AIO_LOCK_ASSERT(ki, MA_OWNED);
968  lj = aiocbe->lio;
969  lj_done = 0;
970  if (lj) {
971  lj->lioj_finished_count++;
972  if (lj->lioj_count == lj->lioj_finished_count)
973  lj_done = 1;
974  }
975  if (type == DONE_QUEUE) {
976  aiocbe->jobflags |= AIOCBLIST_DONE;
977  } else {
978  aiocbe->jobflags |= AIOCBLIST_BUFDONE;
979  }
980  TAILQ_INSERT_TAIL(&ki->kaio_done, aiocbe, plist);
981  aiocbe->jobstate = JOBST_JOBFINISHED;
982 
983  if (ki->kaio_flags & KAIO_RUNDOWN)
984  goto notification_done;
985 
986  if (aiocbe->uaiocb.aio_sigevent.sigev_notify == SIGEV_SIGNAL ||
987  aiocbe->uaiocb.aio_sigevent.sigev_notify == SIGEV_THREAD_ID)
988  aio_sendsig(userp, &aiocbe->uaiocb.aio_sigevent, &aiocbe->ksi);
989 
990  KNOTE_LOCKED(&aiocbe->klist, 1);
991 
992  if (lj_done) {
993  if (lj->lioj_signal.sigev_notify == SIGEV_KEVENT) {
995  KNOTE_LOCKED(&lj->klist, 1);
996  }
998  == LIOJ_SIGNAL
999  && (lj->lioj_signal.sigev_notify == SIGEV_SIGNAL ||
1000  lj->lioj_signal.sigev_notify == SIGEV_THREAD_ID)) {
1001  aio_sendsig(userp, &lj->lioj_signal, &lj->lioj_ksi);
1003  }
1004  }
1005 
1006 notification_done:
1007  if (aiocbe->jobflags & AIOCBLIST_CHECKSYNC) {
1008  TAILQ_FOREACH_SAFE(scb, &ki->kaio_syncqueue, list, scbn) {
1009  if (aiocbe->fd_file == scb->fd_file &&
1010  aiocbe->seqno < scb->seqno) {
1011  if (--scb->pending == 0) {
1012  mtx_lock(&aio_job_mtx);
1013  scb->jobstate = JOBST_JOBQGLOBAL;
1014  TAILQ_REMOVE(&ki->kaio_syncqueue, scb, list);
1015  TAILQ_INSERT_TAIL(&aio_jobs, scb, list);
1016  aio_kick_nowait(userp);
1017  mtx_unlock(&aio_job_mtx);
1018  }
1019  }
1020  }
1021  }
1022  if (ki->kaio_flags & KAIO_WAKEUP) {
1023  ki->kaio_flags &= ~KAIO_WAKEUP;
1024  wakeup(&userp->p_aioinfo);
1025  }
1026 }
1027 
1028 /*
1029  * The AIO daemon, most of the actual work is done in aio_process,
1030  * but the setup (and address space mgmt) is done in this routine.
1031  */
1032 static void
1033 aio_daemon(void *_id)
1034 {
1035  struct aiocblist *aiocbe;
1036  struct aiothreadlist *aiop;
1037  struct kaioinfo *ki;
1038  struct proc *curcp, *mycp, *userp;
1039  struct vmspace *myvm, *tmpvm;
1040  struct thread *td = curthread;
1041  int id = (intptr_t)_id;
1042 
1043  /*
1044  * Local copies of curproc (cp) and vmspace (myvm)
1045  */
1046  mycp = td->td_proc;
1047  myvm = mycp->p_vmspace;
1048 
1049  KASSERT(mycp->p_textvp == NULL, ("kthread has a textvp"));
1050 
1051  /*
1052  * Allocate and ready the aio control info. There is one aiop structure
1053  * per daemon.
1054  */
1055  aiop = uma_zalloc(aiop_zone, M_WAITOK);
1056  aiop->aiothread = td;
1057  aiop->aiothreadflags = 0;
1058 
1059  /* The daemon resides in its own pgrp. */
1060  sys_setsid(td, NULL);
1061 
1062  /*
1063  * Wakeup parent process. (Parent sleeps to keep from blasting away
1064  * and creating too many daemons.)
1065  */
1066  sema_post(&aio_newproc_sem);
1067 
1068  mtx_lock(&aio_job_mtx);
1069  for (;;) {
1070  /*
1071  * curcp is the current daemon process context.
1072  * userp is the current user process context.
1073  */
1074  curcp = mycp;
1075 
1076  /*
1077  * Take daemon off of free queue
1078  */
1079  if (aiop->aiothreadflags & AIOP_FREE) {
1080  TAILQ_REMOVE(&aio_freeproc, aiop, list);
1081  aiop->aiothreadflags &= ~AIOP_FREE;
1082  }
1083 
1084  /*
1085  * Check for jobs.
1086  */
1087  while ((aiocbe = aio_selectjob(aiop)) != NULL) {
1088  mtx_unlock(&aio_job_mtx);
1089  userp = aiocbe->userproc;
1090 
1091  /*
1092  * Connect to process address space for user program.
1093  */
1094  if (userp != curcp) {
1095  /*
1096  * Save the current address space that we are
1097  * connected to.
1098  */
1099  tmpvm = mycp->p_vmspace;
1100 
1101  /*
1102  * Point to the new user address space, and
1103  * refer to it.
1104  */
1105  mycp->p_vmspace = userp->p_vmspace;
1106  atomic_add_int(&mycp->p_vmspace->vm_refcnt, 1);
1107 
1108  /* Activate the new mapping. */
1109  pmap_activate(FIRST_THREAD_IN_PROC(mycp));
1110 
1111  /*
1112  * If the old address space wasn't the daemons
1113  * own address space, then we need to remove the
1114  * daemon's reference from the other process
1115  * that it was acting on behalf of.
1116  */
1117  if (tmpvm != myvm) {
1118  vmspace_free(tmpvm);
1119  }
1120  curcp = userp;
1121  }
1122 
1123  ki = userp->p_aioinfo;
1124 
1125  /* Do the I/O function. */
1126  aio_process(aiocbe);
1127 
1128  mtx_lock(&aio_job_mtx);
1129  /* Decrement the active job count. */
1130  ki->kaio_active_count--;
1131  mtx_unlock(&aio_job_mtx);
1132 
1133  AIO_LOCK(ki);
1134  TAILQ_REMOVE(&ki->kaio_jobqueue, aiocbe, plist);
1135  aio_bio_done_notify(userp, aiocbe, DONE_QUEUE);
1136  AIO_UNLOCK(ki);
1137 
1138  mtx_lock(&aio_job_mtx);
1139  }
1140 
1141  /*
1142  * Disconnect from user address space.
1143  */
1144  if (curcp != mycp) {
1145 
1146  mtx_unlock(&aio_job_mtx);
1147 
1148  /* Get the user address space to disconnect from. */
1149  tmpvm = mycp->p_vmspace;
1150 
1151  /* Get original address space for daemon. */
1152  mycp->p_vmspace = myvm;
1153 
1154  /* Activate the daemon's address space. */
1155  pmap_activate(FIRST_THREAD_IN_PROC(mycp));
1156 #ifdef DIAGNOSTIC
1157  if (tmpvm == myvm) {
1158  printf("AIOD: vmspace problem -- %d\n",
1159  mycp->p_pid);
1160  }
1161 #endif
1162  /* Remove our vmspace reference. */
1163  vmspace_free(tmpvm);
1164 
1165  curcp = mycp;
1166 
1167  mtx_lock(&aio_job_mtx);
1168  /*
1169  * We have to restart to avoid race, we only sleep if
1170  * no job can be selected, that should be
1171  * curcp == mycp.
1172  */
1173  continue;
1174  }
1175 
1176  mtx_assert(&aio_job_mtx, MA_OWNED);
1177 
1178  TAILQ_INSERT_HEAD(&aio_freeproc, aiop, list);
1179  aiop->aiothreadflags |= AIOP_FREE;
1180 
1181  /*
1182  * If daemon is inactive for a long time, allow it to exit,
1183  * thereby freeing resources.
1184  */
1185  if (msleep(aiop->aiothread, &aio_job_mtx, PRIBIO, "aiordy",
1186  aiod_lifetime)) {
1187  if (TAILQ_EMPTY(&aio_jobs)) {
1188  if ((aiop->aiothreadflags & AIOP_FREE) &&
1190  TAILQ_REMOVE(&aio_freeproc, aiop, list);
1191  num_aio_procs--;
1192  mtx_unlock(&aio_job_mtx);
1193  uma_zfree(aiop_zone, aiop);
1194  free_unr(aiod_unr, id);
1195 #ifdef DIAGNOSTIC
1196  if (mycp->p_vmspace->vm_refcnt <= 1) {
1197  printf("AIOD: bad vm refcnt for"
1198  " exiting daemon: %d\n",
1199  mycp->p_vmspace->vm_refcnt);
1200  }
1201 #endif
1202  kproc_exit(0);
1203  }
1204  }
1205  }
1206  }
1207  mtx_unlock(&aio_job_mtx);
1208  panic("shouldn't be here\n");
1209 }
1210 
1211 /*
1212  * Create a new AIO daemon. This is mostly a kernel-thread fork routine. The
1213  * AIO daemon modifies its environment itself.
1214  */
1215 static int
1217 {
1218  int error;
1219  struct proc *p;
1220  int id;
1221 
1222  id = alloc_unr(aiod_unr);
1223  error = kproc_create(aio_daemon, (void *)(intptr_t)id, &p,
1224  RFNOWAIT, 0, "aiod%d", id);
1225  if (error == 0) {
1226  /*
1227  * Wait until daemon is started.
1228  */
1229  sema_wait(&aio_newproc_sem);
1230  mtx_lock(&aio_job_mtx);
1231  num_aio_procs++;
1232  if (start != NULL)
1233  (*start)--;
1234  mtx_unlock(&aio_job_mtx);
1235  } else {
1236  free_unr(aiod_unr, id);
1237  }
1238  return (error);
1239 }
1240 
1241 /*
1242  * Try the high-performance, low-overhead physio method for eligible
1243  * VCHR devices. This method doesn't use an aio helper thread, and
1244  * thus has very low overhead.
1245  *
1246  * Assumes that the caller, aio_aqueue(), has incremented the file
1247  * structure's reference count, preventing its deallocation for the
1248  * duration of this call.
1249  */
1250 static int
1251 aio_qphysio(struct proc *p, struct aiocblist *aiocbe)
1252 {
1253  struct aiocb *cb;
1254  struct file *fp;
1255  struct buf *bp;
1256  struct vnode *vp;
1257  struct cdevsw *csw;
1258  struct cdev *dev;
1259  struct kaioinfo *ki;
1260  struct aioliojob *lj;
1261  int error, ref;
1262 
1263  cb = &aiocbe->uaiocb;
1264  fp = aiocbe->fd_file;
1265 
1266  if (fp->f_type != DTYPE_VNODE)
1267  return (-1);
1268 
1269  vp = fp->f_vnode;
1270 
1271  /*
1272  * If its not a disk, we don't want to return a positive error.
1273  * It causes the aio code to not fall through to try the thread
1274  * way when you're talking to a regular file.
1275  */
1276  if (!vn_isdisk(vp, &error)) {
1277  if (error == ENOTBLK)
1278  return (-1);
1279  else
1280  return (error);
1281  }
1282 
1283  if (vp->v_bufobj.bo_bsize == 0)
1284  return (-1);
1285 
1286  if (cb->aio_nbytes % vp->v_bufobj.bo_bsize)
1287  return (-1);
1288 
1289  if (cb->aio_nbytes >
1290  MAXPHYS - (((vm_offset_t) cb->aio_buf) & PAGE_MASK))
1291  return (-1);
1292 
1293  ki = p->p_aioinfo;
1294  if (ki->kaio_buffer_count >= ki->kaio_ballowed_count)
1295  return (-1);
1296 
1297  ref = 0;
1298  csw = devvn_refthread(vp, &dev, &ref);
1299  if (csw == NULL)
1300  return (ENXIO);
1301  if (cb->aio_nbytes > dev->si_iosize_max) {
1302  error = -1;
1303  goto unref;
1304  }
1305 
1306  /* Create and build a buffer header for a transfer. */
1307  bp = (struct buf *)getpbuf(NULL);
1308  BUF_KERNPROC(bp);
1309 
1310  AIO_LOCK(ki);
1311  ki->kaio_count++;
1312  ki->kaio_buffer_count++;
1313  lj = aiocbe->lio;
1314  if (lj)
1315  lj->lioj_count++;
1316  AIO_UNLOCK(ki);
1317 
1318  /*
1319  * Get a copy of the kva from the physical buffer.
1320  */
1321  error = 0;
1322 
1323  bp->b_bcount = cb->aio_nbytes;
1324  bp->b_bufsize = cb->aio_nbytes;
1325  bp->b_iodone = aio_physwakeup;
1326  bp->b_saveaddr = bp->b_data;
1327  bp->b_data = (void *)(uintptr_t)cb->aio_buf;
1328  bp->b_offset = cb->aio_offset;
1329  bp->b_iooffset = cb->aio_offset;
1330  bp->b_blkno = btodb(cb->aio_offset);
1331  bp->b_iocmd = cb->aio_lio_opcode == LIO_WRITE ? BIO_WRITE : BIO_READ;
1332 
1333  /*
1334  * Bring buffer into kernel space.
1335  */
1336  if (vmapbuf(bp, (dev->si_flags & SI_UNMAPPED) == 0) < 0) {
1337  error = EFAULT;
1338  goto doerror;
1339  }
1340 
1341  AIO_LOCK(ki);
1342  aiocbe->bp = bp;
1343  bp->b_caller1 = (void *)aiocbe;
1344  TAILQ_INSERT_TAIL(&ki->kaio_bufqueue, aiocbe, plist);
1345  TAILQ_INSERT_TAIL(&ki->kaio_all, aiocbe, allist);
1346  aiocbe->jobstate = JOBST_JOBQBUF;
1347  cb->_aiocb_private.status = cb->aio_nbytes;
1348  AIO_UNLOCK(ki);
1349 
1350  atomic_add_int(&num_queue_count, 1);
1351  atomic_add_int(&num_buf_aio, 1);
1352 
1353  bp->b_error = 0;
1354 
1355  TASK_INIT(&aiocbe->biotask, 0, biohelper, aiocbe);
1356 
1357  /* Perform transfer. */
1358  dev_strategy_csw(dev, csw, bp);
1359  dev_relthread(dev, ref);
1360  return (0);
1361 
1362 doerror:
1363  AIO_LOCK(ki);
1364  ki->kaio_count--;
1365  ki->kaio_buffer_count--;
1366  if (lj)
1367  lj->lioj_count--;
1368  aiocbe->bp = NULL;
1369  AIO_UNLOCK(ki);
1370  relpbuf(bp, NULL);
1371 unref:
1372  dev_relthread(dev, ref);
1373  return (error);
1374 }
1375 
1376 /*
1377  * Wake up aio requests that may be serviceable now.
1378  */
1379 static void
1380 aio_swake_cb(struct socket *so, struct sockbuf *sb)
1381 {
1382  struct aiocblist *cb, *cbn;
1383  int opcode;
1384 
1385  SOCKBUF_LOCK_ASSERT(sb);
1386  if (sb == &so->so_snd)
1387  opcode = LIO_WRITE;
1388  else
1389  opcode = LIO_READ;
1390 
1391  sb->sb_flags &= ~SB_AIO;
1392  mtx_lock(&aio_job_mtx);
1393  TAILQ_FOREACH_SAFE(cb, &so->so_aiojobq, list, cbn) {
1394  if (opcode == cb->uaiocb.aio_lio_opcode) {
1395  if (cb->jobstate != JOBST_JOBQSOCK)
1396  panic("invalid queue value");
1397  /* XXX
1398  * We don't have actual sockets backend yet,
1399  * so we simply move the requests to the generic
1400  * file I/O backend.
1401  */
1402  TAILQ_REMOVE(&so->so_aiojobq, cb, list);
1403  TAILQ_INSERT_TAIL(&aio_jobs, cb, list);
1404  aio_kick_nowait(cb->userproc);
1405  }
1406  }
1407  mtx_unlock(&aio_job_mtx);
1408 }
1409 
1410 static int
1411 convert_old_sigevent(struct osigevent *osig, struct sigevent *nsig)
1412 {
1413 
1414  /*
1415  * Only SIGEV_NONE, SIGEV_SIGNAL, and SIGEV_KEVENT are
1416  * supported by AIO with the old sigevent structure.
1417  */
1418  nsig->sigev_notify = osig->sigev_notify;
1419  switch (nsig->sigev_notify) {
1420  case SIGEV_NONE:
1421  break;
1422  case SIGEV_SIGNAL:
1423  nsig->sigev_signo = osig->__sigev_u.__sigev_signo;
1424  break;
1425  case SIGEV_KEVENT:
1426  nsig->sigev_notify_kqueue =
1427  osig->__sigev_u.__sigev_notify_kqueue;
1428  nsig->sigev_value.sival_ptr = osig->sigev_value.sival_ptr;
1429  break;
1430  default:
1431  return (EINVAL);
1432  }
1433  return (0);
1434 }
1435 
1436 static int
1437 aiocb_copyin_old_sigevent(struct aiocb *ujob, struct aiocb *kjob)
1438 {
1439  struct oaiocb *ojob;
1440  int error;
1441 
1442  bzero(kjob, sizeof(struct aiocb));
1443  error = copyin(ujob, kjob, sizeof(struct oaiocb));
1444  if (error)
1445  return (error);
1446  ojob = (struct oaiocb *)kjob;
1447  return (convert_old_sigevent(&ojob->aio_sigevent, &kjob->aio_sigevent));
1448 }
1449 
1450 static int
1451 aiocb_copyin(struct aiocb *ujob, struct aiocb *kjob)
1452 {
1453 
1454  return (copyin(ujob, kjob, sizeof(struct aiocb)));
1455 }
1456 
1457 static long
1458 aiocb_fetch_status(struct aiocb *ujob)
1459 {
1460 
1461  return (fuword(&ujob->_aiocb_private.status));
1462 }
1463 
1464 static long
1465 aiocb_fetch_error(struct aiocb *ujob)
1466 {
1467 
1468  return (fuword(&ujob->_aiocb_private.error));
1469 }
1470 
1471 static int
1472 aiocb_store_status(struct aiocb *ujob, long status)
1473 {
1474 
1475  return (suword(&ujob->_aiocb_private.status, status));
1476 }
1477 
1478 static int
1479 aiocb_store_error(struct aiocb *ujob, long error)
1480 {
1481 
1482  return (suword(&ujob->_aiocb_private.error, error));
1483 }
1484 
1485 static int
1486 aiocb_store_kernelinfo(struct aiocb *ujob, long jobref)
1487 {
1488 
1489  return (suword(&ujob->_aiocb_private.kernelinfo, jobref));
1490 }
1491 
1492 static int
1493 aiocb_store_aiocb(struct aiocb **ujobp, struct aiocb *ujob)
1494 {
1495 
1496  return (suword(ujobp, (long)ujob));
1497 }
1498 
1499 static struct aiocb_ops aiocb_ops = {
1500  .copyin = aiocb_copyin,
1501  .fetch_status = aiocb_fetch_status,
1502  .fetch_error = aiocb_fetch_error,
1503  .store_status = aiocb_store_status,
1504  .store_error = aiocb_store_error,
1505  .store_kernelinfo = aiocb_store_kernelinfo,
1506  .store_aiocb = aiocb_store_aiocb,
1507 };
1508 
1509 static struct aiocb_ops aiocb_ops_osigevent = {
1511  .fetch_status = aiocb_fetch_status,
1512  .fetch_error = aiocb_fetch_error,
1513  .store_status = aiocb_store_status,
1514  .store_error = aiocb_store_error,
1515  .store_kernelinfo = aiocb_store_kernelinfo,
1516  .store_aiocb = aiocb_store_aiocb,
1517 };
1518 
1519 /*
1520  * Queue a new AIO request. Choosing either the threaded or direct physio VCHR
1521  * technique is done in this code.
1522  */
1523 int
1524 aio_aqueue(struct thread *td, struct aiocb *job, struct aioliojob *lj,
1525  int type, struct aiocb_ops *ops)
1526 {
1527  struct proc *p = td->td_proc;
1528  struct file *fp;
1529  struct socket *so;
1530  struct aiocblist *aiocbe, *cb;
1531  struct kaioinfo *ki;
1532  struct kevent kev;
1533  struct sockbuf *sb;
1534  int opcode;
1535  int error;
1536  int fd, kqfd;
1537  int jid;
1538  u_short evflags;
1539 
1540  if (p->p_aioinfo == NULL)
1541  aio_init_aioinfo(p);
1542 
1543  ki = p->p_aioinfo;
1544 
1545  ops->store_status(job, -1);
1546  ops->store_error(job, 0);
1547  ops->store_kernelinfo(job, -1);
1548 
1550  ki->kaio_count >= ki->kaio_qallowed_count) {
1551  ops->store_error(job, EAGAIN);
1552  return (EAGAIN);
1553  }
1554 
1555  aiocbe = uma_zalloc(aiocb_zone, M_WAITOK | M_ZERO);
1556  aiocbe->inputcharge = 0;
1557  aiocbe->outputcharge = 0;
1558  knlist_init_mtx(&aiocbe->klist, AIO_MTX(ki));
1559 
1560  error = ops->copyin(job, &aiocbe->uaiocb);
1561  if (error) {
1562  ops->store_error(job, error);
1563  uma_zfree(aiocb_zone, aiocbe);
1564  return (error);
1565  }
1566 
1567  if (aiocbe->uaiocb.aio_sigevent.sigev_notify != SIGEV_KEVENT &&
1568  aiocbe->uaiocb.aio_sigevent.sigev_notify != SIGEV_SIGNAL &&
1569  aiocbe->uaiocb.aio_sigevent.sigev_notify != SIGEV_THREAD_ID &&
1570  aiocbe->uaiocb.aio_sigevent.sigev_notify != SIGEV_NONE) {
1571  ops->store_error(job, EINVAL);
1572  uma_zfree(aiocb_zone, aiocbe);
1573  return (EINVAL);
1574  }
1575 
1576  if ((aiocbe->uaiocb.aio_sigevent.sigev_notify == SIGEV_SIGNAL ||
1577  aiocbe->uaiocb.aio_sigevent.sigev_notify == SIGEV_THREAD_ID) &&
1578  !_SIG_VALID(aiocbe->uaiocb.aio_sigevent.sigev_signo)) {
1579  uma_zfree(aiocb_zone, aiocbe);
1580  return (EINVAL);
1581  }
1582 
1583  ksiginfo_init(&aiocbe->ksi);
1584 
1585  /* Save userspace address of the job info. */
1586  aiocbe->uuaiocb = job;
1587 
1588  /* Get the opcode. */
1589  if (type != LIO_NOP)
1590  aiocbe->uaiocb.aio_lio_opcode = type;
1591  opcode = aiocbe->uaiocb.aio_lio_opcode;
1592 
1593  /*
1594  * Validate the opcode and fetch the file object for the specified
1595  * file descriptor.
1596  *
1597  * XXXRW: Moved the opcode validation up here so that we don't
1598  * retrieve a file descriptor without knowing what the capabiltity
1599  * should be.
1600  */
1601  fd = aiocbe->uaiocb.aio_fildes;
1602  switch (opcode) {
1603  case LIO_WRITE:
1604  error = fget_write(td, fd, CAP_WRITE | CAP_SEEK, &fp);
1605  break;
1606  case LIO_READ:
1607  error = fget_read(td, fd, CAP_READ | CAP_SEEK, &fp);
1608  break;
1609  case LIO_SYNC:
1610  error = fget(td, fd, CAP_FSYNC, &fp);
1611  break;
1612  case LIO_NOP:
1613  error = fget(td, fd, 0, &fp);
1614  break;
1615  default:
1616  error = EINVAL;
1617  }
1618  if (error) {
1619  uma_zfree(aiocb_zone, aiocbe);
1620  ops->store_error(job, error);
1621  return (error);
1622  }
1623 
1624  if (opcode == LIO_SYNC && fp->f_vnode == NULL) {
1625  error = EINVAL;
1626  goto aqueue_fail;
1627  }
1628 
1629  if (opcode != LIO_SYNC && aiocbe->uaiocb.aio_offset == -1LL) {
1630  error = EINVAL;
1631  goto aqueue_fail;
1632  }
1633 
1634  aiocbe->fd_file = fp;
1635 
1636  mtx_lock(&aio_job_mtx);
1637  jid = jobrefid++;
1638  aiocbe->seqno = jobseqno++;
1639  mtx_unlock(&aio_job_mtx);
1640  error = ops->store_kernelinfo(job, jid);
1641  if (error) {
1642  error = EINVAL;
1643  goto aqueue_fail;
1644  }
1645  aiocbe->uaiocb._aiocb_private.kernelinfo = (void *)(intptr_t)jid;
1646 
1647  if (opcode == LIO_NOP) {
1648  fdrop(fp, td);
1649  uma_zfree(aiocb_zone, aiocbe);
1650  return (0);
1651  }
1652 
1653  if (aiocbe->uaiocb.aio_sigevent.sigev_notify != SIGEV_KEVENT)
1654  goto no_kqueue;
1655  evflags = aiocbe->uaiocb.aio_sigevent.sigev_notify_kevent_flags;
1656  if ((evflags & ~(EV_CLEAR | EV_DISPATCH | EV_ONESHOT)) != 0) {
1657  error = EINVAL;
1658  goto aqueue_fail;
1659  }
1660  kqfd = aiocbe->uaiocb.aio_sigevent.sigev_notify_kqueue;
1661  kev.ident = (uintptr_t)aiocbe->uuaiocb;
1662  kev.filter = EVFILT_AIO;
1663  kev.flags = EV_ADD | EV_ENABLE | EV_FLAG1 | evflags;
1664  kev.data = (intptr_t)aiocbe;
1665  kev.udata = aiocbe->uaiocb.aio_sigevent.sigev_value.sival_ptr;
1666  error = kqfd_register(kqfd, &kev, td, 1);
1667 aqueue_fail:
1668  if (error) {
1669  fdrop(fp, td);
1670  uma_zfree(aiocb_zone, aiocbe);
1671  ops->store_error(job, error);
1672  goto done;
1673  }
1674 no_kqueue:
1675 
1676  ops->store_error(job, EINPROGRESS);
1677  aiocbe->uaiocb._aiocb_private.error = EINPROGRESS;
1678  aiocbe->userproc = p;
1679  aiocbe->cred = crhold(td->td_ucred);
1680  aiocbe->jobflags = 0;
1681  aiocbe->lio = lj;
1682 
1683  if (opcode == LIO_SYNC)
1684  goto queueit;
1685 
1686  if (fp->f_type == DTYPE_SOCKET) {
1687  /*
1688  * Alternate queueing for socket ops: Reach down into the
1689  * descriptor to get the socket data. Then check to see if the
1690  * socket is ready to be read or written (based on the requested
1691  * operation).
1692  *
1693  * If it is not ready for io, then queue the aiocbe on the
1694  * socket, and set the flags so we get a call when sbnotify()
1695  * happens.
1696  *
1697  * Note if opcode is neither LIO_WRITE nor LIO_READ we lock
1698  * and unlock the snd sockbuf for no reason.
1699  */
1700  so = fp->f_data;
1701  sb = (opcode == LIO_READ) ? &so->so_rcv : &so->so_snd;
1702  SOCKBUF_LOCK(sb);
1703  if (((opcode == LIO_READ) && (!soreadable(so))) || ((opcode ==
1704  LIO_WRITE) && (!sowriteable(so)))) {
1705  sb->sb_flags |= SB_AIO;
1706 
1707  mtx_lock(&aio_job_mtx);
1708  TAILQ_INSERT_TAIL(&so->so_aiojobq, aiocbe, list);
1709  mtx_unlock(&aio_job_mtx);
1710 
1711  AIO_LOCK(ki);
1712  TAILQ_INSERT_TAIL(&ki->kaio_all, aiocbe, allist);
1713  TAILQ_INSERT_TAIL(&ki->kaio_jobqueue, aiocbe, plist);
1714  aiocbe->jobstate = JOBST_JOBQSOCK;
1715  ki->kaio_count++;
1716  if (lj)
1717  lj->lioj_count++;
1718  AIO_UNLOCK(ki);
1719  SOCKBUF_UNLOCK(sb);
1720  atomic_add_int(&num_queue_count, 1);
1721  error = 0;
1722  goto done;
1723  }
1724  SOCKBUF_UNLOCK(sb);
1725  }
1726 
1727  if ((error = aio_qphysio(p, aiocbe)) == 0)
1728  goto done;
1729 #if 0
1730  if (error > 0) {
1731  aiocbe->uaiocb._aiocb_private.error = error;
1732  ops->store_error(job, error);
1733  goto done;
1734  }
1735 #endif
1736 queueit:
1737  /* No buffer for daemon I/O. */
1738  aiocbe->bp = NULL;
1739  atomic_add_int(&num_queue_count, 1);
1740 
1741  AIO_LOCK(ki);
1742  ki->kaio_count++;
1743  if (lj)
1744  lj->lioj_count++;
1745  TAILQ_INSERT_TAIL(&ki->kaio_jobqueue, aiocbe, plist);
1746  TAILQ_INSERT_TAIL(&ki->kaio_all, aiocbe, allist);
1747  if (opcode == LIO_SYNC) {
1748  TAILQ_FOREACH(cb, &ki->kaio_jobqueue, plist) {
1749  if (cb->fd_file == aiocbe->fd_file &&
1750  cb->uaiocb.aio_lio_opcode != LIO_SYNC &&
1751  cb->seqno < aiocbe->seqno) {
1752  cb->jobflags |= AIOCBLIST_CHECKSYNC;
1753  aiocbe->pending++;
1754  }
1755  }
1756  TAILQ_FOREACH(cb, &ki->kaio_bufqueue, plist) {
1757  if (cb->fd_file == aiocbe->fd_file &&
1758  cb->uaiocb.aio_lio_opcode != LIO_SYNC &&
1759  cb->seqno < aiocbe->seqno) {
1760  cb->jobflags |= AIOCBLIST_CHECKSYNC;
1761  aiocbe->pending++;
1762  }
1763  }
1764  if (aiocbe->pending != 0) {
1765  TAILQ_INSERT_TAIL(&ki->kaio_syncqueue, aiocbe, list);
1766  aiocbe->jobstate = JOBST_JOBQSYNC;
1767  AIO_UNLOCK(ki);
1768  goto done;
1769  }
1770  }
1771  mtx_lock(&aio_job_mtx);
1772  TAILQ_INSERT_TAIL(&aio_jobs, aiocbe, list);
1773  aiocbe->jobstate = JOBST_JOBQGLOBAL;
1774  aio_kick_nowait(p);
1775  mtx_unlock(&aio_job_mtx);
1776  AIO_UNLOCK(ki);
1777  error = 0;
1778 done:
1779  return (error);
1780 }
1781 
1782 static void
1783 aio_kick_nowait(struct proc *userp)
1784 {
1785  struct kaioinfo *ki = userp->p_aioinfo;
1786  struct aiothreadlist *aiop;
1787 
1788  mtx_assert(&aio_job_mtx, MA_OWNED);
1789  if ((aiop = TAILQ_FIRST(&aio_freeproc)) != NULL) {
1790  TAILQ_REMOVE(&aio_freeproc, aiop, list);
1791  aiop->aiothreadflags &= ~AIOP_FREE;
1792  wakeup(aiop->aiothread);
1793  } else if (((num_aio_resv_start + num_aio_procs) < max_aio_procs) &&
1795  ki->kaio_maxactive_count)) {
1796  taskqueue_enqueue(taskqueue_aiod_bio, &ki->kaio_task);
1797  }
1798 }
1799 
1800 static int
1801 aio_kick(struct proc *userp)
1802 {
1803  struct kaioinfo *ki = userp->p_aioinfo;
1804  struct aiothreadlist *aiop;
1805  int error, ret = 0;
1806 
1807  mtx_assert(&aio_job_mtx, MA_OWNED);
1808 retryproc:
1809  if ((aiop = TAILQ_FIRST(&aio_freeproc)) != NULL) {
1810  TAILQ_REMOVE(&aio_freeproc, aiop, list);
1811  aiop->aiothreadflags &= ~AIOP_FREE;
1812  wakeup(aiop->aiothread);
1813  } else if (((num_aio_resv_start + num_aio_procs) < max_aio_procs) &&
1815  ki->kaio_maxactive_count)) {
1817  mtx_unlock(&aio_job_mtx);
1818  error = aio_newproc(&num_aio_resv_start);
1819  mtx_lock(&aio_job_mtx);
1820  if (error) {
1822  goto retryproc;
1823  }
1824  } else {
1825  ret = -1;
1826  }
1827  return (ret);
1828 }
1829 
1830 static void
1831 aio_kick_helper(void *context, int pending)
1832 {
1833  struct proc *userp = context;
1834 
1835  mtx_lock(&aio_job_mtx);
1836  while (--pending >= 0) {
1837  if (aio_kick(userp))
1838  break;
1839  }
1840  mtx_unlock(&aio_job_mtx);
1841 }
1842 
1843 /*
1844  * Support the aio_return system call, as a side-effect, kernel resources are
1845  * released.
1846  */
1847 static int
1848 kern_aio_return(struct thread *td, struct aiocb *uaiocb, struct aiocb_ops *ops)
1849 {
1850  struct proc *p = td->td_proc;
1851  struct aiocblist *cb;
1852  struct kaioinfo *ki;
1853  int status, error;
1854 
1855  ki = p->p_aioinfo;
1856  if (ki == NULL)
1857  return (EINVAL);
1858  AIO_LOCK(ki);
1859  TAILQ_FOREACH(cb, &ki->kaio_done, plist) {
1860  if (cb->uuaiocb == uaiocb)
1861  break;
1862  }
1863  if (cb != NULL) {
1864  MPASS(cb->jobstate == JOBST_JOBFINISHED);
1865  status = cb->uaiocb._aiocb_private.status;
1866  error = cb->uaiocb._aiocb_private.error;
1867  td->td_retval[0] = status;
1868  if (cb->uaiocb.aio_lio_opcode == LIO_WRITE) {
1869  td->td_ru.ru_oublock += cb->outputcharge;
1870  cb->outputcharge = 0;
1871  } else if (cb->uaiocb.aio_lio_opcode == LIO_READ) {
1872  td->td_ru.ru_inblock += cb->inputcharge;
1873  cb->inputcharge = 0;
1874  }
1875  aio_free_entry(cb);
1876  AIO_UNLOCK(ki);
1877  ops->store_error(uaiocb, error);
1878  ops->store_status(uaiocb, status);
1879  } else {
1880  error = EINVAL;
1881  AIO_UNLOCK(ki);
1882  }
1883  return (error);
1884 }
1885 
1886 int
1887 sys_aio_return(struct thread *td, struct aio_return_args *uap)
1888 {
1889 
1890  return (kern_aio_return(td, uap->aiocbp, &aiocb_ops));
1891 }
1892 
1893 /*
1894  * Allow a process to wakeup when any of the I/O requests are completed.
1895  */
1896 static int
1897 kern_aio_suspend(struct thread *td, int njoblist, struct aiocb **ujoblist,
1898  struct timespec *ts)
1899 {
1900  struct proc *p = td->td_proc;
1901  struct timeval atv;
1902  struct kaioinfo *ki;
1903  struct aiocblist *cb, *cbfirst;
1904  int error, i, timo;
1905 
1906  timo = 0;
1907  if (ts) {
1908  if (ts->tv_nsec < 0 || ts->tv_nsec >= 1000000000)
1909  return (EINVAL);
1910 
1911  TIMESPEC_TO_TIMEVAL(&atv, ts);
1912  if (itimerfix(&atv))
1913  return (EINVAL);
1914  timo = tvtohz(&atv);
1915  }
1916 
1917  ki = p->p_aioinfo;
1918  if (ki == NULL)
1919  return (EAGAIN);
1920 
1921  if (njoblist == 0)
1922  return (0);
1923 
1924  AIO_LOCK(ki);
1925  for (;;) {
1926  cbfirst = NULL;
1927  error = 0;
1928  TAILQ_FOREACH(cb, &ki->kaio_all, allist) {
1929  for (i = 0; i < njoblist; i++) {
1930  if (cb->uuaiocb == ujoblist[i]) {
1931  if (cbfirst == NULL)
1932  cbfirst = cb;
1933  if (cb->jobstate == JOBST_JOBFINISHED)
1934  goto RETURN;
1935  }
1936  }
1937  }
1938  /* All tasks were finished. */
1939  if (cbfirst == NULL)
1940  break;
1941 
1942  ki->kaio_flags |= KAIO_WAKEUP;
1943  error = msleep(&p->p_aioinfo, AIO_MTX(ki), PRIBIO | PCATCH,
1944  "aiospn", timo);
1945  if (error == ERESTART)
1946  error = EINTR;
1947  if (error)
1948  break;
1949  }
1950 RETURN:
1951  AIO_UNLOCK(ki);
1952  return (error);
1953 }
1954 
1955 int
1956 sys_aio_suspend(struct thread *td, struct aio_suspend_args *uap)
1957 {
1958  struct timespec ts, *tsp;
1959  struct aiocb **ujoblist;
1960  int error;
1961 
1962  if (uap->nent < 0 || uap->nent > AIO_LISTIO_MAX)
1963  return (EINVAL);
1964 
1965  if (uap->timeout) {
1966  /* Get timespec struct. */
1967  if ((error = copyin(uap->timeout, &ts, sizeof(ts))) != 0)
1968  return (error);
1969  tsp = &ts;
1970  } else
1971  tsp = NULL;
1972 
1973  ujoblist = uma_zalloc(aiol_zone, M_WAITOK);
1974  error = copyin(uap->aiocbp, ujoblist, uap->nent * sizeof(ujoblist[0]));
1975  if (error == 0)
1976  error = kern_aio_suspend(td, uap->nent, ujoblist, tsp);
1977  uma_zfree(aiol_zone, ujoblist);
1978  return (error);
1979 }
1980 
1981 /*
1982  * aio_cancel cancels any non-physio aio operations not currently in
1983  * progress.
1984  */
1985 int
1986 sys_aio_cancel(struct thread *td, struct aio_cancel_args *uap)
1987 {
1988  struct proc *p = td->td_proc;
1989  struct kaioinfo *ki;
1990  struct aiocblist *cbe, *cbn;
1991  struct file *fp;
1992  struct socket *so;
1993  int error;
1994  int remove;
1995  int cancelled = 0;
1996  int notcancelled = 0;
1997  struct vnode *vp;
1998 
1999  /* Lookup file object. */
2000  error = fget(td, uap->fd, 0, &fp);
2001  if (error)
2002  return (error);
2003 
2004  ki = p->p_aioinfo;
2005  if (ki == NULL)
2006  goto done;
2007 
2008  if (fp->f_type == DTYPE_VNODE) {
2009  vp = fp->f_vnode;
2010  if (vn_isdisk(vp, &error)) {
2011  fdrop(fp, td);
2012  td->td_retval[0] = AIO_NOTCANCELED;
2013  return (0);
2014  }
2015  }
2016 
2017  AIO_LOCK(ki);
2018  TAILQ_FOREACH_SAFE(cbe, &ki->kaio_jobqueue, plist, cbn) {
2019  if ((uap->fd == cbe->uaiocb.aio_fildes) &&
2020  ((uap->aiocbp == NULL) ||
2021  (uap->aiocbp == cbe->uuaiocb))) {
2022  remove = 0;
2023 
2024  mtx_lock(&aio_job_mtx);
2025  if (cbe->jobstate == JOBST_JOBQGLOBAL) {
2026  TAILQ_REMOVE(&aio_jobs, cbe, list);
2027  remove = 1;
2028  } else if (cbe->jobstate == JOBST_JOBQSOCK) {
2029  MPASS(fp->f_type == DTYPE_SOCKET);
2030  so = fp->f_data;
2031  TAILQ_REMOVE(&so->so_aiojobq, cbe, list);
2032  remove = 1;
2033  } else if (cbe->jobstate == JOBST_JOBQSYNC) {
2034  TAILQ_REMOVE(&ki->kaio_syncqueue, cbe, list);
2035  remove = 1;
2036  }
2037  mtx_unlock(&aio_job_mtx);
2038 
2039  if (remove) {
2040  TAILQ_REMOVE(&ki->kaio_jobqueue, cbe, plist);
2041  cbe->uaiocb._aiocb_private.status = -1;
2042  cbe->uaiocb._aiocb_private.error = ECANCELED;
2044  cancelled++;
2045  } else {
2046  notcancelled++;
2047  }
2048  if (uap->aiocbp != NULL)
2049  break;
2050  }
2051  }
2052  AIO_UNLOCK(ki);
2053 
2054 done:
2055  fdrop(fp, td);
2056 
2057  if (uap->aiocbp != NULL) {
2058  if (cancelled) {
2059  td->td_retval[0] = AIO_CANCELED;
2060  return (0);
2061  }
2062  }
2063 
2064  if (notcancelled) {
2065  td->td_retval[0] = AIO_NOTCANCELED;
2066  return (0);
2067  }
2068 
2069  if (cancelled) {
2070  td->td_retval[0] = AIO_CANCELED;
2071  return (0);
2072  }
2073 
2074  td->td_retval[0] = AIO_ALLDONE;
2075 
2076  return (0);
2077 }
2078 
2079 /*
2080  * aio_error is implemented in the kernel level for compatibility purposes
2081  * only. For a user mode async implementation, it would be best to do it in
2082  * a userland subroutine.
2083  */
2084 static int
2085 kern_aio_error(struct thread *td, struct aiocb *aiocbp, struct aiocb_ops *ops)
2086 {
2087  struct proc *p = td->td_proc;
2088  struct aiocblist *cb;
2089  struct kaioinfo *ki;
2090  int status;
2091 
2092  ki = p->p_aioinfo;
2093  if (ki == NULL) {
2094  td->td_retval[0] = EINVAL;
2095  return (0);
2096  }
2097 
2098  AIO_LOCK(ki);
2099  TAILQ_FOREACH(cb, &ki->kaio_all, allist) {
2100  if (cb->uuaiocb == aiocbp) {
2101  if (cb->jobstate == JOBST_JOBFINISHED)
2102  td->td_retval[0] =
2103  cb->uaiocb._aiocb_private.error;
2104  else
2105  td->td_retval[0] = EINPROGRESS;
2106  AIO_UNLOCK(ki);
2107  return (0);
2108  }
2109  }
2110  AIO_UNLOCK(ki);
2111 
2112  /*
2113  * Hack for failure of aio_aqueue.
2114  */
2115  status = ops->fetch_status(aiocbp);
2116  if (status == -1) {
2117  td->td_retval[0] = ops->fetch_error(aiocbp);
2118  return (0);
2119  }
2120 
2121  td->td_retval[0] = EINVAL;
2122  return (0);
2123 }
2124 
2125 int
2126 sys_aio_error(struct thread *td, struct aio_error_args *uap)
2127 {
2128 
2129  return (kern_aio_error(td, uap->aiocbp, &aiocb_ops));
2130 }
2131 
2132 /* syscall - asynchronous read from a file (REALTIME) */
2133 int
2134 sys_oaio_read(struct thread *td, struct oaio_read_args *uap)
2135 {
2136 
2137  return (aio_aqueue(td, (struct aiocb *)uap->aiocbp, NULL, LIO_READ,
2138  &aiocb_ops_osigevent));
2139 }
2140 
2141 int
2142 sys_aio_read(struct thread *td, struct aio_read_args *uap)
2143 {
2144 
2145  return (aio_aqueue(td, uap->aiocbp, NULL, LIO_READ, &aiocb_ops));
2146 }
2147 
2148 /* syscall - asynchronous write to a file (REALTIME) */
2149 int
2150 sys_oaio_write(struct thread *td, struct oaio_write_args *uap)
2151 {
2152 
2153  return (aio_aqueue(td, (struct aiocb *)uap->aiocbp, NULL, LIO_WRITE,
2154  &aiocb_ops_osigevent));
2155 }
2156 
2157 int
2158 sys_aio_write(struct thread *td, struct aio_write_args *uap)
2159 {
2160 
2161  return (aio_aqueue(td, uap->aiocbp, NULL, LIO_WRITE, &aiocb_ops));
2162 }
2163 
2164 static int
2165 kern_lio_listio(struct thread *td, int mode, struct aiocb * const *uacb_list,
2166  struct aiocb **acb_list, int nent, struct sigevent *sig,
2167  struct aiocb_ops *ops)
2168 {
2169  struct proc *p = td->td_proc;
2170  struct aiocb *iocb;
2171  struct kaioinfo *ki;
2172  struct aioliojob *lj;
2173  struct kevent kev;
2174  int error;
2175  int nerror;
2176  int i;
2177 
2178  if ((mode != LIO_NOWAIT) && (mode != LIO_WAIT))
2179  return (EINVAL);
2180 
2181  if (nent < 0 || nent > AIO_LISTIO_MAX)
2182  return (EINVAL);
2183 
2184  if (p->p_aioinfo == NULL)
2185  aio_init_aioinfo(p);
2186 
2187  ki = p->p_aioinfo;
2188 
2189  lj = uma_zalloc(aiolio_zone, M_WAITOK);
2190  lj->lioj_flags = 0;
2191  lj->lioj_count = 0;
2192  lj->lioj_finished_count = 0;
2193  knlist_init_mtx(&lj->klist, AIO_MTX(ki));
2194  ksiginfo_init(&lj->lioj_ksi);
2195 
2196  /*
2197  * Setup signal.
2198  */
2199  if (sig && (mode == LIO_NOWAIT)) {
2200  bcopy(sig, &lj->lioj_signal, sizeof(lj->lioj_signal));
2201  if (lj->lioj_signal.sigev_notify == SIGEV_KEVENT) {
2202  /* Assume only new style KEVENT */
2203  kev.filter = EVFILT_LIO;
2204  kev.flags = EV_ADD | EV_ENABLE | EV_FLAG1;
2205  kev.ident = (uintptr_t)uacb_list; /* something unique */
2206  kev.data = (intptr_t)lj;
2207  /* pass user defined sigval data */
2208  kev.udata = lj->lioj_signal.sigev_value.sival_ptr;
2209  error = kqfd_register(
2210  lj->lioj_signal.sigev_notify_kqueue, &kev, td, 1);
2211  if (error) {
2212  uma_zfree(aiolio_zone, lj);
2213  return (error);
2214  }
2215  } else if (lj->lioj_signal.sigev_notify == SIGEV_NONE) {
2216  ;
2217  } else if (lj->lioj_signal.sigev_notify == SIGEV_SIGNAL ||
2218  lj->lioj_signal.sigev_notify == SIGEV_THREAD_ID) {
2219  if (!_SIG_VALID(lj->lioj_signal.sigev_signo)) {
2220  uma_zfree(aiolio_zone, lj);
2221  return EINVAL;
2222  }
2223  lj->lioj_flags |= LIOJ_SIGNAL;
2224  } else {
2225  uma_zfree(aiolio_zone, lj);
2226  return EINVAL;
2227  }
2228  }
2229 
2230  AIO_LOCK(ki);
2231  TAILQ_INSERT_TAIL(&ki->kaio_liojoblist, lj, lioj_list);
2232  /*
2233  * Add extra aiocb count to avoid the lio to be freed
2234  * by other threads doing aio_waitcomplete or aio_return,
2235  * and prevent event from being sent until we have queued
2236  * all tasks.
2237  */
2238  lj->lioj_count = 1;
2239  AIO_UNLOCK(ki);
2240 
2241  /*
2242  * Get pointers to the list of I/O requests.
2243  */
2244  nerror = 0;
2245  for (i = 0; i < nent; i++) {
2246  iocb = acb_list[i];
2247  if (iocb != NULL) {
2248  error = aio_aqueue(td, iocb, lj, LIO_NOP, ops);
2249  if (error != 0)
2250  nerror++;
2251  }
2252  }
2253 
2254  error = 0;
2255  AIO_LOCK(ki);
2256  if (mode == LIO_WAIT) {
2257  while (lj->lioj_count - 1 != lj->lioj_finished_count) {
2258  ki->kaio_flags |= KAIO_WAKEUP;
2259  error = msleep(&p->p_aioinfo, AIO_MTX(ki),
2260  PRIBIO | PCATCH, "aiospn", 0);
2261  if (error == ERESTART)
2262  error = EINTR;
2263  if (error)
2264  break;
2265  }
2266  } else {
2267  if (lj->lioj_count - 1 == lj->lioj_finished_count) {
2268  if (lj->lioj_signal.sigev_notify == SIGEV_KEVENT) {
2270  KNOTE_LOCKED(&lj->klist, 1);
2271  }
2273  == LIOJ_SIGNAL
2274  && (lj->lioj_signal.sigev_notify == SIGEV_SIGNAL ||
2275  lj->lioj_signal.sigev_notify == SIGEV_THREAD_ID)) {
2276  aio_sendsig(p, &lj->lioj_signal,
2277  &lj->lioj_ksi);
2279  }
2280  }
2281  }
2282  lj->lioj_count--;
2283  if (lj->lioj_count == 0) {
2284  TAILQ_REMOVE(&ki->kaio_liojoblist, lj, lioj_list);
2285  knlist_delete(&lj->klist, curthread, 1);
2286  PROC_LOCK(p);
2287  sigqueue_take(&lj->lioj_ksi);
2288  PROC_UNLOCK(p);
2289  AIO_UNLOCK(ki);
2290  uma_zfree(aiolio_zone, lj);
2291  } else
2292  AIO_UNLOCK(ki);
2293 
2294  if (nerror)
2295  return (EIO);
2296  return (error);
2297 }
2298 
2299 /* syscall - list directed I/O (REALTIME) */
2300 int
2301 sys_olio_listio(struct thread *td, struct olio_listio_args *uap)
2302 {
2303  struct aiocb **acb_list;
2304  struct sigevent *sigp, sig;
2305  struct osigevent osig;
2306  int error, nent;
2307 
2308  if ((uap->mode != LIO_NOWAIT) && (uap->mode != LIO_WAIT))
2309  return (EINVAL);
2310 
2311  nent = uap->nent;
2312  if (nent < 0 || nent > AIO_LISTIO_MAX)
2313  return (EINVAL);
2314 
2315  if (uap->sig && (uap->mode == LIO_NOWAIT)) {
2316  error = copyin(uap->sig, &osig, sizeof(osig));
2317  if (error)
2318  return (error);
2319  error = convert_old_sigevent(&osig, &sig);
2320  if (error)
2321  return (error);
2322  sigp = &sig;
2323  } else
2324  sigp = NULL;
2325 
2326  acb_list = malloc(sizeof(struct aiocb *) * nent, M_LIO, M_WAITOK);
2327  error = copyin(uap->acb_list, acb_list, nent * sizeof(acb_list[0]));
2328  if (error == 0)
2329  error = kern_lio_listio(td, uap->mode,
2330  (struct aiocb * const *)uap->acb_list, acb_list, nent, sigp,
2331  &aiocb_ops_osigevent);
2332  free(acb_list, M_LIO);
2333  return (error);
2334 }
2335 
2336 /* syscall - list directed I/O (REALTIME) */
2337 int
2338 sys_lio_listio(struct thread *td, struct lio_listio_args *uap)
2339 {
2340  struct aiocb **acb_list;
2341  struct sigevent *sigp, sig;
2342  int error, nent;
2343 
2344  if ((uap->mode != LIO_NOWAIT) && (uap->mode != LIO_WAIT))
2345  return (EINVAL);
2346 
2347  nent = uap->nent;
2348  if (nent < 0 || nent > AIO_LISTIO_MAX)
2349  return (EINVAL);
2350 
2351  if (uap->sig && (uap->mode == LIO_NOWAIT)) {
2352  error = copyin(uap->sig, &sig, sizeof(sig));
2353  if (error)
2354  return (error);
2355  sigp = &sig;
2356  } else
2357  sigp = NULL;
2358 
2359  acb_list = malloc(sizeof(struct aiocb *) * nent, M_LIO, M_WAITOK);
2360  error = copyin(uap->acb_list, acb_list, nent * sizeof(acb_list[0]));
2361  if (error == 0)
2362  error = kern_lio_listio(td, uap->mode, uap->acb_list, acb_list,
2363  nent, sigp, &aiocb_ops);
2364  free(acb_list, M_LIO);
2365  return (error);
2366 }
2367 
2368 /*
2369  * Called from interrupt thread for physio, we should return as fast
2370  * as possible, so we schedule a biohelper task.
2371  */
2372 static void
2373 aio_physwakeup(struct buf *bp)
2374 {
2375  struct aiocblist *aiocbe;
2376 
2377  aiocbe = (struct aiocblist *)bp->b_caller1;
2378  taskqueue_enqueue(taskqueue_aiod_bio, &aiocbe->biotask);
2379 }
2380 
2381 /*
2382  * Task routine to perform heavy tasks, process wakeup, and signals.
2383  */
2384 static void
2385 biohelper(void *context, int pending)
2386 {
2387  struct aiocblist *aiocbe = context;
2388  struct buf *bp;
2389  struct proc *userp;
2390  struct kaioinfo *ki;
2391  int nblks;
2392 
2393  bp = aiocbe->bp;
2394  userp = aiocbe->userproc;
2395  ki = userp->p_aioinfo;
2396  AIO_LOCK(ki);
2397  aiocbe->uaiocb._aiocb_private.status -= bp->b_resid;
2398  aiocbe->uaiocb._aiocb_private.error = 0;
2399  if (bp->b_ioflags & BIO_ERROR)
2400  aiocbe->uaiocb._aiocb_private.error = bp->b_error;
2401  nblks = btodb(aiocbe->uaiocb.aio_nbytes);
2402  if (aiocbe->uaiocb.aio_lio_opcode == LIO_WRITE)
2403  aiocbe->outputcharge += nblks;
2404  else
2405  aiocbe->inputcharge += nblks;
2406  aiocbe->bp = NULL;
2407  TAILQ_REMOVE(&userp->p_aioinfo->kaio_bufqueue, aiocbe, plist);
2408  ki->kaio_buffer_count--;
2409  aio_bio_done_notify(userp, aiocbe, DONE_BUF);
2410  AIO_UNLOCK(ki);
2411 
2412  /* Release mapping into kernel space. */
2413  vunmapbuf(bp);
2414  relpbuf(bp, NULL);
2415  atomic_subtract_int(&num_buf_aio, 1);
2416 }
2417 
2418 /* syscall - wait for the next completion of an aio request */
2419 static int
2420 kern_aio_waitcomplete(struct thread *td, struct aiocb **aiocbp,
2421  struct timespec *ts, struct aiocb_ops *ops)
2422 {
2423  struct proc *p = td->td_proc;
2424  struct timeval atv;
2425  struct kaioinfo *ki;
2426  struct aiocblist *cb;
2427  struct aiocb *uuaiocb;
2428  int error, status, timo;
2429 
2430  ops->store_aiocb(aiocbp, NULL);
2431 
2432  timo = 0;
2433  if (ts) {
2434  if ((ts->tv_nsec < 0) || (ts->tv_nsec >= 1000000000))
2435  return (EINVAL);
2436 
2437  TIMESPEC_TO_TIMEVAL(&atv, ts);
2438  if (itimerfix(&atv))
2439  return (EINVAL);
2440  timo = tvtohz(&atv);
2441  }
2442 
2443  if (p->p_aioinfo == NULL)
2444  aio_init_aioinfo(p);
2445  ki = p->p_aioinfo;
2446 
2447  error = 0;
2448  cb = NULL;
2449  AIO_LOCK(ki);
2450  while ((cb = TAILQ_FIRST(&ki->kaio_done)) == NULL) {
2451  ki->kaio_flags |= KAIO_WAKEUP;
2452  error = msleep(&p->p_aioinfo, AIO_MTX(ki), PRIBIO | PCATCH,
2453  "aiowc", timo);
2454  if (timo && error == ERESTART)
2455  error = EINTR;
2456  if (error)
2457  break;
2458  }
2459 
2460  if (cb != NULL) {
2461  MPASS(cb->jobstate == JOBST_JOBFINISHED);
2462  uuaiocb = cb->uuaiocb;
2463  status = cb->uaiocb._aiocb_private.status;
2464  error = cb->uaiocb._aiocb_private.error;
2465  td->td_retval[0] = status;
2466  if (cb->uaiocb.aio_lio_opcode == LIO_WRITE) {
2467  td->td_ru.ru_oublock += cb->outputcharge;
2468  cb->outputcharge = 0;
2469  } else if (cb->uaiocb.aio_lio_opcode == LIO_READ) {
2470  td->td_ru.ru_inblock += cb->inputcharge;
2471  cb->inputcharge = 0;
2472  }
2473  aio_free_entry(cb);
2474  AIO_UNLOCK(ki);
2475  ops->store_aiocb(aiocbp, uuaiocb);
2476  ops->store_error(uuaiocb, error);
2477  ops->store_status(uuaiocb, status);
2478  } else
2479  AIO_UNLOCK(ki);
2480 
2481  return (error);
2482 }
2483 
2484 int
2485 sys_aio_waitcomplete(struct thread *td, struct aio_waitcomplete_args *uap)
2486 {
2487  struct timespec ts, *tsp;
2488  int error;
2489 
2490  if (uap->timeout) {
2491  /* Get timespec struct. */
2492  error = copyin(uap->timeout, &ts, sizeof(ts));
2493  if (error)
2494  return (error);
2495  tsp = &ts;
2496  } else
2497  tsp = NULL;
2498 
2499  return (kern_aio_waitcomplete(td, uap->aiocbp, tsp, &aiocb_ops));
2500 }
2501 
2502 static int
2503 kern_aio_fsync(struct thread *td, int op, struct aiocb *aiocbp,
2504  struct aiocb_ops *ops)
2505 {
2506  struct proc *p = td->td_proc;
2507  struct kaioinfo *ki;
2508 
2509  if (op != O_SYNC) /* XXX lack of O_DSYNC */
2510  return (EINVAL);
2511  ki = p->p_aioinfo;
2512  if (ki == NULL)
2513  aio_init_aioinfo(p);
2514  return (aio_aqueue(td, aiocbp, NULL, LIO_SYNC, ops));
2515 }
2516 
2517 int
2518 sys_aio_fsync(struct thread *td, struct aio_fsync_args *uap)
2519 {
2520 
2521  return (kern_aio_fsync(td, uap->op, uap->aiocbp, &aiocb_ops));
2522 }
2523 
2524 /* kqueue attach function */
2525 static int
2527 {
2528  struct aiocblist *aiocbe = (struct aiocblist *)kn->kn_sdata;
2529 
2530  /*
2531  * The aiocbe pointer must be validated before using it, so
2532  * registration is restricted to the kernel; the user cannot
2533  * set EV_FLAG1.
2534  */
2535  if ((kn->kn_flags & EV_FLAG1) == 0)
2536  return (EPERM);
2537  kn->kn_ptr.p_aio = aiocbe;
2538  kn->kn_flags &= ~EV_FLAG1;
2539 
2540  knlist_add(&aiocbe->klist, kn, 0);
2541 
2542  return (0);
2543 }
2544 
2545 /* kqueue detach function */
2546 static void
2548 {
2549  struct knlist *knl;
2550 
2551  knl = &kn->kn_ptr.p_aio->klist;
2552  knl->kl_lock(knl->kl_lockarg);
2553  if (!knlist_empty(knl))
2554  knlist_remove(knl, kn, 1);
2555  knl->kl_unlock(knl->kl_lockarg);
2556 }
2557 
2558 /* kqueue filter function */
2559 /*ARGSUSED*/
2560 static int
2561 filt_aio(struct knote *kn, long hint)
2562 {
2563  struct aiocblist *aiocbe = kn->kn_ptr.p_aio;
2564 
2565  kn->kn_data = aiocbe->uaiocb._aiocb_private.error;
2566  if (aiocbe->jobstate != JOBST_JOBFINISHED)
2567  return (0);
2568  kn->kn_flags |= EV_EOF;
2569  return (1);
2570 }
2571 
2572 /* kqueue attach function */
2573 static int
2575 {
2576  struct aioliojob * lj = (struct aioliojob *)kn->kn_sdata;
2577 
2578  /*
2579  * The aioliojob pointer must be validated before using it, so
2580  * registration is restricted to the kernel; the user cannot
2581  * set EV_FLAG1.
2582  */
2583  if ((kn->kn_flags & EV_FLAG1) == 0)
2584  return (EPERM);
2585  kn->kn_ptr.p_lio = lj;
2586  kn->kn_flags &= ~EV_FLAG1;
2587 
2588  knlist_add(&lj->klist, kn, 0);
2589 
2590  return (0);
2591 }
2592 
2593 /* kqueue detach function */
2594 static void
2596 {
2597  struct knlist *knl;
2598 
2599  knl = &kn->kn_ptr.p_lio->klist;
2600  knl->kl_lock(knl->kl_lockarg);
2601  if (!knlist_empty(knl))
2602  knlist_remove(knl, kn, 1);
2603  knl->kl_unlock(knl->kl_lockarg);
2604 }
2605 
2606 /* kqueue filter function */
2607 /*ARGSUSED*/
2608 static int
2609 filt_lio(struct knote *kn, long hint)
2610 {
2611  struct aioliojob * lj = kn->kn_ptr.p_lio;
2612 
2613  return (lj->lioj_flags & LIOJ_KEVENT_POSTED);
2614 }
2615 
2616 #ifdef COMPAT_32BIT
2617 
2618 struct __aiocb_private32 {
2619  int32_t status;
2620  int32_t error;
2621  uint32_t kernelinfo;
2622 };
2623 
2624 typedef struct oaiocb32 {
2625  int aio_fildes; /* File descriptor */
2626  uint64_t aio_offset __packed; /* File offset for I/O */
2627  uint32_t aio_buf; /* I/O buffer in process space */
2628  uint32_t aio_nbytes; /* Number of bytes for I/O */
2629  struct osigevent32 aio_sigevent; /* Signal to deliver */
2630  int aio_lio_opcode; /* LIO opcode */
2631  int aio_reqprio; /* Request priority -- ignored */
2632  struct __aiocb_private32 _aiocb_private;
2633 } oaiocb32_t;
2634 
2635 typedef struct aiocb32 {
2636  int32_t aio_fildes; /* File descriptor */
2637  uint64_t aio_offset __packed; /* File offset for I/O */
2638  uint32_t aio_buf; /* I/O buffer in process space */
2639  uint32_t aio_nbytes; /* Number of bytes for I/O */
2640  int __spare__[2];
2641  uint32_t __spare2__;
2642  int aio_lio_opcode; /* LIO opcode */
2643  int aio_reqprio; /* Request priority -- ignored */
2644  struct __aiocb_private32 _aiocb_private;
2645  struct sigevent32 aio_sigevent; /* Signal to deliver */
2646 } aiocb32_t;
2647 
2648 static int
2649 convert_old_sigevent32(struct osigevent32 *osig, struct sigevent *nsig)
2650 {
2651 
2652  /*
2653  * Only SIGEV_NONE, SIGEV_SIGNAL, and SIGEV_KEVENT are
2654  * supported by AIO with the old sigevent structure.
2655  */
2656  CP(*osig, *nsig, sigev_notify);
2657  switch (nsig->sigev_notify) {
2658  case SIGEV_NONE:
2659  break;
2660  case SIGEV_SIGNAL:
2661  nsig->sigev_signo = osig->__sigev_u.__sigev_signo;
2662  break;
2663  case SIGEV_KEVENT:
2664  nsig->sigev_notify_kqueue =
2665  osig->__sigev_u.__sigev_notify_kqueue;
2666  PTRIN_CP(*osig, *nsig, sigev_value.sival_ptr);
2667  break;
2668  default:
2669  return (EINVAL);
2670  }
2671  return (0);
2672 }
2673 
2674 static int
2675 aiocb32_copyin_old_sigevent(struct aiocb *ujob, struct aiocb *kjob)
2676 {
2677  struct oaiocb32 job32;
2678  int error;
2679 
2680  bzero(kjob, sizeof(struct aiocb));
2681  error = copyin(ujob, &job32, sizeof(job32));
2682  if (error)
2683  return (error);
2684 
2685  CP(job32, *kjob, aio_fildes);
2686  CP(job32, *kjob, aio_offset);
2687  PTRIN_CP(job32, *kjob, aio_buf);
2688  CP(job32, *kjob, aio_nbytes);
2689  CP(job32, *kjob, aio_lio_opcode);
2690  CP(job32, *kjob, aio_reqprio);
2691  CP(job32, *kjob, _aiocb_private.status);
2692  CP(job32, *kjob, _aiocb_private.error);
2693  PTRIN_CP(job32, *kjob, _aiocb_private.kernelinfo);
2694  return (convert_old_sigevent32(&job32.aio_sigevent,
2695  &kjob->aio_sigevent));
2696 }
2697 
2698 static int
2699 aiocb32_copyin(struct aiocb *ujob, struct aiocb *kjob)
2700 {
2701  struct aiocb32 job32;
2702  int error;
2703 
2704  error = copyin(ujob, &job32, sizeof(job32));
2705  if (error)
2706  return (error);
2707  CP(job32, *kjob, aio_fildes);
2708  CP(job32, *kjob, aio_offset);
2709  PTRIN_CP(job32, *kjob, aio_buf);
2710  CP(job32, *kjob, aio_nbytes);
2711  CP(job32, *kjob, aio_lio_opcode);
2712  CP(job32, *kjob, aio_reqprio);
2713  CP(job32, *kjob, _aiocb_private.status);
2714  CP(job32, *kjob, _aiocb_private.error);
2715  PTRIN_CP(job32, *kjob, _aiocb_private.kernelinfo);
2716  return (convert_sigevent32(&job32.aio_sigevent, &kjob->aio_sigevent));
2717 }
2718 
2719 static long
2720 aiocb32_fetch_status(struct aiocb *ujob)
2721 {
2722  struct aiocb32 *ujob32;
2723 
2724  ujob32 = (struct aiocb32 *)ujob;
2725  return (fuword32(&ujob32->_aiocb_private.status));
2726 }
2727 
2728 static long
2729 aiocb32_fetch_error(struct aiocb *ujob)
2730 {
2731  struct aiocb32 *ujob32;
2732 
2733  ujob32 = (struct aiocb32 *)ujob;
2734  return (fuword32(&ujob32->_aiocb_private.error));
2735 }
2736 
2737 static int
2738 aiocb32_store_status(struct aiocb *ujob, long status)
2739 {
2740  struct aiocb32 *ujob32;
2741 
2742  ujob32 = (struct aiocb32 *)ujob;
2743  return (suword32(&ujob32->_aiocb_private.status, status));
2744 }
2745 
2746 static int
2747 aiocb32_store_error(struct aiocb *ujob, long error)
2748 {
2749  struct aiocb32 *ujob32;
2750 
2751  ujob32 = (struct aiocb32 *)ujob;
2752  return (suword32(&ujob32->_aiocb_private.error, error));
2753 }
2754 
2755 static int
2756 aiocb32_store_kernelinfo(struct aiocb *ujob, long jobref)
2757 {
2758  struct aiocb32 *ujob32;
2759 
2760  ujob32 = (struct aiocb32 *)ujob;
2761  return (suword32(&ujob32->_aiocb_private.kernelinfo, jobref));
2762 }
2763 
2764 static int
2765 aiocb32_store_aiocb(struct aiocb **ujobp, struct aiocb *ujob)
2766 {
2767 
2768  return (suword32(ujobp, (long)ujob));
2769 }
2770 
2771 static struct aiocb_ops aiocb32_ops = {
2772  .copyin = aiocb32_copyin,
2773  .fetch_status = aiocb32_fetch_status,
2774  .fetch_error = aiocb32_fetch_error,
2775  .store_status = aiocb32_store_status,
2776  .store_error = aiocb32_store_error,
2777  .store_kernelinfo = aiocb32_store_kernelinfo,
2778  .store_aiocb = aiocb32_store_aiocb,
2779 };
2780 
2781 static struct aiocb_ops aiocb32_ops_osigevent = {
2782  .copyin = aiocb32_copyin_old_sigevent,
2783  .fetch_status = aiocb32_fetch_status,
2784  .fetch_error = aiocb32_fetch_error,
2785  .store_status = aiocb32_store_status,
2786  .store_error = aiocb32_store_error,
2787  .store_kernelinfo = aiocb32_store_kernelinfo,
2788  .store_aiocb = aiocb32_store_aiocb,
2789 };
2790 
2791 int
2792 compat32bit_aio_return(struct thread *td, struct compat32bit_aio_return_args *uap)
2793 {
2794 
2795  return (kern_aio_return(td, (struct aiocb *)uap->aiocbp, &aiocb32_ops));
2796 }
2797 
2798 int
2799 compat32bit_aio_suspend(struct thread *td, struct compat32bit_aio_suspend_args *uap)
2800 {
2801  struct timespec32 ts32;
2802  struct timespec ts, *tsp;
2803  struct aiocb **ujoblist;
2804  uint32_t *ujoblist32;
2805  int error, i;
2806 
2807  if (uap->nent < 0 || uap->nent > AIO_LISTIO_MAX)
2808  return (EINVAL);
2809 
2810  if (uap->timeout) {
2811  /* Get timespec struct. */
2812  if ((error = copyin(uap->timeout, &ts32, sizeof(ts32))) != 0)
2813  return (error);
2814  CP(ts32, ts, tv_sec);
2815  CP(ts32, ts, tv_nsec);
2816  tsp = &ts;
2817  } else
2818  tsp = NULL;
2819 
2820  ujoblist = uma_zalloc(aiol_zone, M_WAITOK);
2821  ujoblist32 = (uint32_t *)ujoblist;
2822  error = copyin(uap->aiocbp, ujoblist32, uap->nent *
2823  sizeof(ujoblist32[0]));
2824  if (error == 0) {
2825  for (i = uap->nent; i > 0; i--)
2826  ujoblist[i] = PTRIN(ujoblist32[i]);
2827 
2828  error = kern_aio_suspend(td, uap->nent, ujoblist, tsp);
2829  }
2830  uma_zfree(aiol_zone, ujoblist);
2831  return (error);
2832 }
2833 
2834 int
2835 compat32bit_aio_cancel(struct thread *td, struct compat32bit_aio_cancel_args *uap)
2836 {
2837 
2838  return (sys_aio_cancel(td, (struct aio_cancel_args *)uap));
2839 }
2840 
2841 int
2842 compat32bit_aio_error(struct thread *td, struct compat32bit_aio_error_args *uap)
2843 {
2844 
2845  return (kern_aio_error(td, (struct aiocb *)uap->aiocbp, &aiocb32_ops));
2846 }
2847 
2848 int
2849 compat32bit_oaio_read(struct thread *td, struct compat32bit_oaio_read_args *uap)
2850 {
2851 
2852  return (aio_aqueue(td, (struct aiocb *)uap->aiocbp, NULL, LIO_READ,
2853  &aiocb32_ops_osigevent));
2854 }
2855 
2856 int
2857 compat32bit_aio_read(struct thread *td, struct compat32bit_aio_read_args *uap)
2858 {
2859 
2860  return (aio_aqueue(td, (struct aiocb *)uap->aiocbp, NULL, LIO_READ,
2861  &aiocb32_ops));
2862 }
2863 
2864 int
2865 compat32bit_oaio_write(struct thread *td, struct compat32bit_oaio_write_args *uap)
2866 {
2867 
2868  return (aio_aqueue(td, (struct aiocb *)uap->aiocbp, NULL, LIO_WRITE,
2869  &aiocb32_ops_osigevent));
2870 }
2871 
2872 int
2873 compat32bit_aio_write(struct thread *td, struct compat32bit_aio_write_args *uap)
2874 {
2875 
2876  return (aio_aqueue(td, (struct aiocb *)uap->aiocbp, NULL, LIO_WRITE,
2877  &aiocb32_ops));
2878 }
2879 
2880 int
2881 compat32bit_aio_waitcomplete(struct thread *td,
2882  struct compat32bit_aio_waitcomplete_args *uap)
2883 {
2884  struct timespec32 ts32;
2885  struct timespec ts, *tsp;
2886  int error;
2887 
2888  if (uap->timeout) {
2889  /* Get timespec struct. */
2890  error = copyin(uap->timeout, &ts32, sizeof(ts32));
2891  if (error)
2892  return (error);
2893  CP(ts32, ts, tv_sec);
2894  CP(ts32, ts, tv_nsec);
2895  tsp = &ts;
2896  } else
2897  tsp = NULL;
2898 
2899  return (kern_aio_waitcomplete(td, (struct aiocb **)uap->aiocbp, tsp,
2900  &aiocb32_ops));
2901 }
2902 
2903 int
2904 compat32bit_aio_fsync(struct thread *td, struct compat32bit_aio_fsync_args *uap)
2905 {
2906 
2907  return (kern_aio_fsync(td, uap->op, (struct aiocb *)uap->aiocbp,
2908  &aiocb32_ops));
2909 }
2910 
2911 int
2912 compat32bit_olio_listio(struct thread *td, struct compat32bit_olio_listio_args *uap)
2913 {
2914  struct aiocb **acb_list;
2915  struct sigevent *sigp, sig;
2916  struct osigevent32 osig;
2917  uint32_t *acb_list32;
2918  int error, i, nent;
2919 
2920  if ((uap->mode != LIO_NOWAIT) && (uap->mode != LIO_WAIT))
2921  return (EINVAL);
2922 
2923  nent = uap->nent;
2924  if (nent < 0 || nent > AIO_LISTIO_MAX)
2925  return (EINVAL);
2926 
2927  if (uap->sig && (uap->mode == LIO_NOWAIT)) {
2928  error = copyin(uap->sig, &osig, sizeof(osig));
2929  if (error)
2930  return (error);
2931  error = convert_old_sigevent32(&osig, &sig);
2932  if (error)
2933  return (error);
2934  sigp = &sig;
2935  } else
2936  sigp = NULL;
2937 
2938  acb_list32 = malloc(sizeof(uint32_t) * nent, M_LIO, M_WAITOK);
2939  error = copyin(uap->acb_list, acb_list32, nent * sizeof(uint32_t));
2940  if (error) {
2941  free(acb_list32, M_LIO);
2942  return (error);
2943  }
2944  acb_list = malloc(sizeof(struct aiocb *) * nent, M_LIO, M_WAITOK);
2945  for (i = 0; i < nent; i++)
2946  acb_list[i] = PTRIN(acb_list32[i]);
2947  free(acb_list32, M_LIO);
2948 
2949  error = kern_lio_listio(td, uap->mode,
2950  (struct aiocb * const *)uap->acb_list, acb_list, nent, sigp,
2951  &aiocb32_ops_osigevent);
2952  free(acb_list, M_LIO);
2953  return (error);
2954 }
2955 
2956 int
2957 compat32bit_lio_listio(struct thread *td, struct compat32bit_lio_listio_args *uap)
2958 {
2959  struct aiocb **acb_list;
2960  struct sigevent *sigp, sig;
2961  struct sigevent32 sig32;
2962  uint32_t *acb_list32;
2963  int error, i, nent;
2964 
2965  if ((uap->mode != LIO_NOWAIT) && (uap->mode != LIO_WAIT))
2966  return (EINVAL);
2967 
2968  nent = uap->nent;
2969  if (nent < 0 || nent > AIO_LISTIO_MAX)
2970  return (EINVAL);
2971 
2972  if (uap->sig && (uap->mode == LIO_NOWAIT)) {
2973  error = copyin(uap->sig, &sig32, sizeof(sig32));
2974  if (error)
2975  return (error);
2976  error = convert_sigevent32(&sig32, &sig);
2977  if (error)
2978  return (error);
2979  sigp = &sig;
2980  } else
2981  sigp = NULL;
2982 
2983  acb_list32 = malloc(sizeof(uint32_t) * nent, M_LIO, M_WAITOK);
2984  error = copyin(uap->acb_list, acb_list32, nent * sizeof(uint32_t));
2985  if (error) {
2986  free(acb_list32, M_LIO);
2987  return (error);
2988  }
2989  acb_list = malloc(sizeof(struct aiocb *) * nent, M_LIO, M_WAITOK);
2990  for (i = 0; i < nent; i++)
2991  acb_list[i] = PTRIN(acb_list32[i]);
2992  free(acb_list32, M_LIO);
2993 
2994  error = kern_lio_listio(td, uap->mode,
2995  (struct aiocb * const *)uap->acb_list, acb_list, nent, sigp,
2996  &aiocb32_ops);
2997  free(acb_list, M_LIO);
2998  return (error);
2999 }
3000 
3001 #endif
SYSCTL_INT(_vfs_aio, OID_AUTO, max_aio_procs, CTLFLAG_RW,&max_aio_procs, 0,"Maximum number of kernel threads to use for handling async IO ")
static eventhandler_tag exec_tag
Definition: vfs_aio.c:389
int(* store_error)(struct aiocb *ujob, long error)
Definition: vfs_aio.c:325
int kqueue_del_filteropts(int filt)
Definition: kern_event.c:906
static int max_aio_per_proc
Definition: vfs_aio.c:178
int syscall_helper_unregister(struct syscall_helper_data *sd)
int tvtohz(struct timeval *tv)
Definition: kern_clock.c:590
static void filt_aiodetach(struct knote *kn)
Definition: vfs_aio.c:2547
static int filt_lio(struct knote *kn, long hint)
Definition: vfs_aio.c:2609
static int aio_newproc(int *start)
Definition: vfs_aio.c:1216
int fd
Definition: kern_exec.c:199
#define TARGET_AIO_PROCS
Definition: vfs_aio.c:110
void(* aio_swake)(struct socket *, struct sockbuf *)
Definition: uipc_sockbuf.c:56
int kaio_ballowed_count
Definition: vfs_aio.c:294
int kaio_qallowed_count
Definition: vfs_aio.c:292
static struct filterops lio_filtops
Definition: vfs_aio.c:382
static int aiocb_copyin_old_sigevent(struct aiocb *ujob, struct aiocb *kjob)
Definition: vfs_aio.c:1437
void taskqueue_drain(struct taskqueue *queue, struct task *task)
void aio_init_aioinfo(struct proc *p)
Definition: vfs_aio.c:575
struct buf * buf
Definition: vfs_bio.c:97
int syscall_helper_register(struct syscall_helper_data *sd)
struct timespec * ts
Definition: clock_if.m:39
int mode
void *** start
Definition: linker_if.m:86
static void aio_swake_cb(struct socket *so, struct sockbuf *sb)
Definition: vfs_aio.c:1380
DECLARE_MODULE(aio, aio_mod, SI_SUB_VFS, SI_ORDER_ANY)
static int aiocb_store_status(struct aiocb *ujob, long status)
Definition: vfs_aio.c:1472
#define LIOJ_SIGNAL_POSTED
Definition: vfs_aio.c:281
void * malloc(unsigned long size, struct malloc_type *mtp, int flags)
Definition: kern_malloc.c:454
static long aiocb_fetch_status(struct aiocb *ujob)
Definition: vfs_aio.c:1458
void dev_strategy_csw(struct cdev *dev, struct cdevsw *csw, struct buf *bp)
Definition: vfs_bio.c:3771
struct oaiocb oaiocb_t
static int num_aio_resv_start
Definition: vfs_aio.c:163
#define MAX_AIO_QUEUE_PER_PROC
Definition: vfs_aio.c:98
void sigqueue_take(ksiginfo_t *ksi)
Definition: kern_sig.c:324
static int kern_aio_return(struct thread *td, struct aiocb *uaiocb, struct aiocb_ops *ops)
Definition: vfs_aio.c:1848
void panic(const char *fmt,...)
void bwillwrite(void)
Definition: vfs_bio.c:1409
void vn_finished_write(struct mount *mp)
Definition: vfs_vnops.c:1599
int(* store_status)(struct aiocb *ujob, long status)
Definition: vfs_aio.c:324
#define AIOD_TIMEOUT_DEFAULT
Definition: vfs_aio.c:118
static int aiod_timeout
Definition: vfs_aio.c:165
static long aiocb_fetch_error(struct aiocb *ujob)
Definition: vfs_aio.c:1465
static int filt_lioattach(struct knote *kn)
Definition: vfs_aio.c:2574
void knote(struct knlist *list, long hint, int lockflags)
Definition: kern_event.c:1806
void dev_relthread(struct cdev *dev, int ref)
Definition: kern_conf.c:249
#define JOBST_JOBQBUF
Definition: vfs_aio.c:90
int knlist_empty(struct knlist *knl)
Definition: kern_event.c:1927
int lioj_finished_count
Definition: vfs_aio.c:273
int alloc_unr(struct unrhdr *uh)
Definition: subr_unit.c:620
#define JOBST_JOBQGLOBAL
Definition: vfs_aio.c:87
int kaio_flags
Definition: vfs_aio.c:289
int(* store_kernelinfo)(struct aiocb *ujob, long jobref)
Definition: vfs_aio.c:326
static void aio_proc_rundown(void *arg, struct proc *p)
Definition: vfs_aio.c:721
static int target_aio_procs
Definition: vfs_aio.c:145
void kern_psignal(struct proc *p, int sig)
Definition: kern_sig.c:1975
static int aio_qphysio(struct proc *p, struct aiocblist *aiocbe)
Definition: vfs_aio.c:1251
static int kern_aio_waitcomplete(struct thread *td, struct aiocb **aiocbp, struct timespec *ts, struct aiocb_ops *ops)
Definition: vfs_aio.c:2420
FEATURE(aio,"Asynchronous I/O")
static int aiocb_store_error(struct aiocb *ujob, long error)
Definition: vfs_aio.c:1479
int fget(struct thread *td, int fd, cap_rights_t rights, struct file **fpp)
static int kern_aio_fsync(struct thread *td, int op, struct aiocb *aiocbp, struct aiocb_ops *ops)
Definition: vfs_aio.c:2503
int * type
Definition: cpufreq_if.m:98
#define JOBST_NULL
Definition: vfs_aio.c:85
static int num_queue_count
Definition: vfs_aio.c:153
static int aiocb_store_aiocb(struct aiocb **ujobp, struct aiocb *ujob)
Definition: vfs_aio.c:1493
void vunmapbuf(struct buf *bp)
Definition: vfs_bio.c:4410
static int unloadable
Definition: vfs_aio.c:173
size_t aio_nbytes
Definition: vfs_aio.c:195
#define JOBST_JOBQSOCK
Definition: vfs_aio.c:86
#define AIO_UNLOCK(ki)
Definition: vfs_aio.c:309
static void aio_kick_helper(void *context, int pending)
Definition: vfs_aio.c:1831
static int kern_aio_suspend(struct thread *td, int njoblist, struct aiocb **ujoblist, struct timespec *ts)
Definition: vfs_aio.c:1897
int(* copyin)(struct aiocb *ujob, struct aiocb *kjob)
Definition: vfs_aio.c:321
#define MAX_BUF_AIO
Definition: vfs_aio.c:114
static int aio_kick(struct proc *userp)
Definition: vfs_aio.c:1801
int aio_fildes
Definition: vfs_aio.c:192
#define JOBST_JOBQSYNC
Definition: vfs_aio.c:91
static void aio_kick_nowait(struct proc *userp)
Definition: vfs_aio.c:1783
static void aio_daemon(void *_id)
Definition: vfs_aio.c:1033
int sys_aio_return(struct thread *td, struct aio_return_args *uap)
Definition: vfs_aio.c:1887
static moduledata_t aio_mod
Definition: vfs_aio.c:417
int kqfd_register(int fd, struct kevent *kev, struct thread *td, int waitok)
Definition: kern_event.c:2239
#define DONE_BUF
int kqueue_add_filteropts(int filt, struct filterops *filtops)
Definition: kern_event.c:881
#define AIO_MTX(ki)
Definition: vfs_aio.c:311
static int num_buf_aio
Definition: vfs_aio.c:157
void p31b_setcfg(int num, int value)
Definition: posix4_mib.c:127
static int aio_unload(void)
Definition: vfs_aio.c:524
static void aio_proc_rundown_exec(void *arg, struct proc *p, struct image_params *imgp __unused)
Definition: vfs_aio.c:712
void kproc_exit(int ecode)
Definition: kern_kthread.c:141
#define MAX_AIO_QUEUE
Definition: vfs_aio.c:106
#define MAX_AIO_PER_PROC
Definition: vfs_aio.c:94
#define AIOCBLIST_CHECKSYNC
Definition: vfs_aio.c:254
void knlist_remove(struct knlist *knl, struct knote *kn, int islocked)
Definition: kern_event.c:1909
int async_io_version
Definition: vfs_syscalls.c:111
int kaio_buffer_count
Definition: vfs_aio.c:295
int(* store_aiocb)(struct aiocb **ujobp, struct aiocb *ujob)
Definition: vfs_aio.c:327
static int num_aio_procs
Definition: vfs_aio.c:136
void taskqueue_free(struct taskqueue *queue)
int lioj_count
Definition: vfs_aio.c:272
long(* fetch_status)(struct aiocb *ujob)
Definition: vfs_aio.c:322
static void biohelper(void *context, int pending)
Definition: vfs_aio.c:2385
static struct semid_kernel * sema
Definition: sysv_sem.c:100
static int max_aio_queue_per_proc
Definition: vfs_aio.c:182
int aio_aqueue(struct thread *td, struct aiocb *job, struct aioliojob *lj, int type, struct aiocb_ops *ops)
Definition: vfs_aio.c:1524
MODULE_VERSION(aio, 1)
int kaio_maxactive_count
Definition: vfs_aio.c:290
static void aio_bio_done_notify(struct proc *userp, struct aiocblist *aiocbe, int type)
Definition: vfs_aio.c:959
#define DONE_QUEUE
static int filt_aioattach(struct knote *kn)
Definition: vfs_aio.c:2526
static struct aiocb_ops aiocb_ops_osigevent
Definition: vfs_aio.c:1509
static void filt_liodetach(struct knote *kn)
Definition: vfs_aio.c:2595
void crfree(struct ucred *cr)
Definition: kern_prot.c:1835
int sys_aio_suspend(struct thread *td, struct aio_suspend_args *uap)
Definition: vfs_aio.c:1956
#define AIOP_FREE
Definition: vfs_aio.c:259
#define LIOJ_KEVENT_POSTED
Definition: vfs_aio.c:282
#define LIOJ_SIGNAL
Definition: vfs_aio.c:280
#define JOBST_JOBRUNNING
Definition: vfs_aio.c:88
static void aio_physwakeup(struct buf *bp)
Definition: vfs_aio.c:2373
int sigev_findtd(struct proc *p, struct sigevent *sigev, struct thread **ttd)
Definition: kern_sig.c:1994
__FBSDID("$BSDSUniX$")
static int aio_free_entry(struct aiocblist *aiocbe)
Definition: vfs_aio.c:635
volatile void * aio_buf
Definition: vfs_aio.c:194
void knlist_add(struct knlist *knl, struct knote *kn, int islocked)
Definition: kern_event.c:1866
struct unrhdr * new_unrhdr(int low, int high, struct mtx *mutex)
Definition: subr_unit.c:325
int sys_aio_write(struct thread *td, struct aio_write_args *uap)
Definition: vfs_aio.c:2158
struct sigevent lioj_signal
Definition: vfs_aio.c:274
struct cdevsw * devvn_refthread(struct vnode *vp, struct cdev **devp, int *ref)
Definition: kern_conf.c:207
int sys_aio_cancel(struct thread *td, struct aio_cancel_args *uap)
Definition: vfs_aio.c:1986
int sys_setsid(register struct thread *td, struct setsid_args *uap)
Definition: kern_prot.c:336
static int aiod_lifetime
Definition: vfs_aio.c:169
struct ucred * crhold(struct ucred *cr)
Definition: kern_prot.c:1824
static struct aiocblist * aio_selectjob(struct aiothreadlist *aiop)
Definition: vfs_aio.c:808
int aio_reqprio
Definition: vfs_aio.c:198
int sys_olio_listio(struct thread *td, struct olio_listio_args *uap)
Definition: vfs_aio.c:2301
struct osigevent aio_sigevent
Definition: vfs_aio.c:196
int sys_oaio_read(struct thread *td, struct oaio_read_args *uap)
Definition: vfs_aio.c:2134
static eventhandler_tag exit_tag
Definition: vfs_aio.c:389
void free(void *addr, struct malloc_type *mtp)
Definition: kern_malloc.c:554
static uint64_t jobseqno
Definition: vfs_aio.c:83
static int max_queue_count
Definition: vfs_aio.c:149
#define MAX_AIO_PROCS
Definition: vfs_aio.c:102
static int aio_fsync_vnode(struct thread *td, struct vnode *vp)
Definition: vfs_aio.c:835
int printf(const char *fmt,...)
Definition: subr_prf.c:367
#define suword
Definition: imgact_elf.c:978
int sys_aio_error(struct thread *td, struct aio_error_args *uap)
Definition: vfs_aio.c:2126
int taskqueue_enqueue(struct taskqueue *queue, struct task *task)
static int filt_aio(struct knote *kn, long hint)
Definition: vfs_aio.c:2561
#define AIO_LOCK_ASSERT(ki, f)
Definition: vfs_aio.c:310
#define AIO_LOCK(ki)
Definition: vfs_aio.c:308
#define AIOCBLIST_BUFDONE
Definition: vfs_aio.c:252
static void aio_process(struct aiocblist *aiocbe)
Definition: vfs_aio.c:868
struct __aiocb_private _aiocb_private
Definition: vfs_aio.c:199
static int aiocb_store_kernelinfo(struct aiocb *ujob, long jobref)
Definition: vfs_aio.c:1486
int kproc_create(void(*func)(void *), void *arg, struct proc **newpp, int flags, int pages, const char *fmt,...)
Definition: kern_kthread.c:80
static int aio_onceonly(void)
Definition: vfs_aio.c:473
struct mtx kaio_mtx
Definition: vfs_aio.c:288
void mtx_init(struct mtx *m, const char *name, const char *type, int opts)
Definition: kern_mutex.c:837
void wakeup(void *ident)
Definition: kern_synch.c:378
static int kern_aio_error(struct thread *td, struct aiocb *aiocbp, struct aiocb_ops *ops)
Definition: vfs_aio.c:2085
static u_long jobrefid
Definition: vfs_aio.c:78
int aiothreadflags
Definition: vfs_aio.c:262
int vn_start_write(struct vnode *vp, struct mount **mpp, int flags)
Definition: vfs_vnops.c:1491
static int max_aio_procs
Definition: vfs_aio.c:131
static int aio_modload(struct module *module, int cmd, void *arg)
Definition: vfs_aio.c:397
off_t aio_offset
Definition: vfs_aio.c:193
int fget_read(struct thread *td, int fd, cap_rights_t rights, struct file **fpp)
int itimerfix(struct timeval *tv)
Definition: kern_time.c:817
int sys_aio_fsync(struct thread *td, struct aio_fsync_args *uap)
Definition: vfs_aio.c:2518
static int convert_old_sigevent(struct osigevent *osig, struct sigevent *nsig)
Definition: vfs_aio.c:1411
void sema_init(struct sema *sema, int value, const char *description)
Definition: kern_sema.c:48
int tdsendsignal(struct proc *p, struct thread *td, int sig, ksiginfo_t *ksi)
Definition: kern_sig.c:2029
int vn_isdisk(struct vnode *vp, int *errp)
Definition: vfs_subr.c:3885
static MALLOC_DEFINE(M_LIO,"lio","listio aio control block list")
int kaio_count
Definition: vfs_aio.c:293
int fget_write(struct thread *td, int fd, cap_rights_t rights, struct file **fpp)
long(* fetch_error)(struct aiocb *ujob)
Definition: vfs_aio.c:323
void delete_unrhdr(struct unrhdr *uh)
Definition: subr_unit.c:347
#define KAIO_RUNDOWN
Definition: vfs_aio.c:313
#define AIOD_LIFETIME_DEFAULT
Definition: vfs_aio.c:122
int aio_lio_opcode
Definition: vfs_aio.c:197
int sys_lio_listio(struct thread *td, struct lio_listio_args *uap)
Definition: vfs_aio.c:2338
void mtx_destroy(struct mtx *m)
Definition: kern_mutex.c:884
#define KAIO_WAKEUP
Definition: vfs_aio.c:314
int sys_aio_read(struct thread *td, struct aio_read_args *uap)
Definition: vfs_aio.c:2142
TASKQUEUE_DEFINE_THREAD(aiod_bio)
int lioj_flags
Definition: vfs_aio.c:271
#define JOBST_JOBFINISHED
Definition: vfs_aio.c:89
static int max_buf_aio
Definition: vfs_aio.c:187
static int aiocb_copyin(struct aiocb *ujob, struct aiocb *kjob)
Definition: vfs_aio.c:1451
static SYSCTL_NODE(_vfs, OID_AUTO, aio, CTLFLAG_RW, 0,"Async IO management")
int kaio_active_count
Definition: vfs_aio.c:291
void free_unr(struct unrhdr *uh, u_int item)
Definition: subr_unit.c:872
static TAILQ_HEAD(aiothreadlist)
Definition: vfs_aio.c:330
void knlist_init_mtx(struct knlist *knl, struct mtx *lock)
Definition: kern_event.c:1995
int sys_oaio_write(struct thread *td, struct oaio_write_args *uap)
Definition: vfs_aio.c:2150
static int aio_sendsig(struct proc *p, struct sigevent *sigev, ksiginfo_t *ksi)
Definition: vfs_aio.c:611
static int kern_lio_listio(struct thread *td, int mode, struct aiocb *const *uacb_list, struct aiocb **acb_list, int nent, struct sigevent *sig, struct aiocb_ops *ops)
Definition: vfs_aio.c:2165
int vmapbuf(struct buf *bp, int mapbuf)
Definition: vfs_bio.c:4374
#define AIOCBLIST_DONE
Definition: vfs_aio.c:251
int sys_aio_waitcomplete(struct thread *td, struct aio_waitcomplete_args *uap)
Definition: vfs_aio.c:2485
void sema_destroy(struct sema *sema)
Definition: kern_sema.c:63
static struct syscall_helper_data aio_syscalls[]
Definition: vfs_aio.c:423
int hz
Definition: subr_param.c:84