/* @(#)kern_exec.c 1.1 92/07/30 SMI; from UCB 7.1 86/06/05 */
/*
* Copyright (c) 1989 by Sun Microsystems, Inc.
*/
#include <machine/reg.h>
#include <machine/pte.h>
#include <machine/psl.h>
#include <sys/param.h>
#include <sys/systm.h>
#include <sys/mman.h>
#include <sys/user.h>
#include <sys/kernel.h>
#include <sys/proc.h>
#include <sys/socketvar.h>
#include <sys/vnode.h>
#include <sys/pathname.h>
#include <sys/vm.h>
#include <sys/file.h>
#include <sys/uio.h>
#include <sys/acct.h>
#include <sys/vfs.h>
#include <sys/asynch.h>
#include <sys/syslog.h>
#include <sys/trace.h>
#include <vm/hat.h>
#include <vm/anon.h>
#include <vm/as.h>
#include <vm/seg.h>
#include <vm/seg_vn.h>
#include <machine/seg_kmem.h>
#ifdef sparc
#include <machine/asm_linkage.h>
#endif sparc
#ifdef sun4m
#ifndef MULTIPROCESSOR
#define cpuid (0)
#else MULTIPROCESSOR
extern int cpuid;
#endif MULTIPROCESSOR
#endif sun4m
/*
* If the PREREAD(size) macro evaulates true, then we will preread in
* the given text or data a.out segment even though the file is ZMAGIC.
*/
#define PREREAD(size) \
((int)btopr(size) < (int)(freemem - minfree) && btopr(size) < pgthresh)
int pgthresh = btopr(PGTHRESH); /* maximum preread size */
#define ARG_HUNKSIZE 0x4000
struct arg_hunk {
struct arg_hunk *a_next;
char a_buf[ARG_HUNKSIZE - sizeof (struct arg_hunk *)];
};
#define ARGS_GET_IN_PROGRESS 0x01
#define ARGS_GET_WAITING 0x02
/*
* Allocate new struct arg_hunk in the args virtual address area as
* pageable data and return a pointer to the new area after linking
* the new area on the arg_hunk struct pointer passed in.
*/
static struct arg_hunk *
args_get(a)
struct arg_hunk *a;
{
struct arg_hunk *na;
addr_t base;
u_int len;
static char argsget_flag = 0;
base = Syslimit; /* start of virtual arg space area */
len = NCARGS;
/*
* There is a possible race here because we could be put to sleep
* between the as_hole call and the portion of the as_map call
* that locks down the space for us. Thus, two
* processes could both end up getting the same value back from
* as_hole. The solution is to only let one process through at
* a time.
*/
while (argsget_flag & ARGS_GET_IN_PROGRESS) {
argsget_flag |= ARGS_GET_WAITING;
(void) sleep((caddr_t)&argsget_flag, PZERO - 1);
}
argsget_flag = ARGS_GET_IN_PROGRESS;
if (as_hole(&kas,
sizeof (struct arg_hunk), &base, &len, AH_LO) != A_SUCCESS) {
u.u_error = E2BIG;
na = (struct arg_hunk *)NULL;
goto out;
}
u.u_error = as_map(&kas, base, sizeof (struct arg_hunk), segvn_create,
kzfod_argsp);
if (u.u_error) {
na = (struct arg_hunk *)NULL;
goto out;
}
na = (struct arg_hunk *)base;
if (a != NULL)
a->a_next = na;
out:
/*
* End of locked region.
*/
if (argsget_flag & ARGS_GET_WAITING) {
wakeup((caddr_t)&argsget_flag);
}
argsget_flag = 0;
return (na);
}
static void
args_free(a)
register struct arg_hunk *a;
{
register struct arg_hunk *ta;
while ((ta = a) != NULL) {
a = ta->a_next;
if (as_unmap(&kas, (addr_t)ta, sizeof (*ta)) != A_SUCCESS)
panic("args_free as_unmap");
}
}
/*
* exec system call, with and without environments.
*/
struct execa {
char *fname;
char **argp;
char **envp;
};
execv()
{
((struct execa *)u.u_ap)->envp = NULL;
execve();
}
execve()
{
register int nc, error;
register char *cp;
register struct execa *uap;
register struct arg_hunk *args;
struct arg_hunk *args_start;
int na, ne, ucp, ap;
int indir, uid, gid;
char *sharg, *execnamep;
struct vnode *vp;
struct vattr vattr;
struct pathname pn;
char cfarg[SHSIZE], exnam[SHSIZE];
char *enp = &exnam[0];
int resid;
u_int len, cc;
extern void astop();
uap = (struct execa *)u.u_ap;
indir = 0;
uid = u.u_uid;
gid = u.u_gid;
args_start = NULL;
vp = NULL;
u.u_error = pn_get(uap->fname, UIO_USERSPACE, &pn);
if (u.u_error != 0)
return;
again:
error = lookuppn(&pn, FOLLOW_LINK, (struct vnode **)0, &vp);
if (error != 0)
goto bad;
if (vp->v_type != VREG) {
error = EACCES;
goto bad;
}
error = VOP_GETATTR(vp, &vattr, u.u_cred);
if (error != 0)
goto bad;
if (indir == 0) {
if (vp->v_vfsp->vfs_flag & VFS_NOSUID) {
if ((vattr.va_mode & (VSUID | VSGID)) != 0) {
log(LOG_WARNING,
"%s, uid %d: setuid execution not allowed\n", pn.pn_buf, uid);
}
} else {
if (vattr.va_mode & VSUID)
uid = vattr.va_uid;
if (vattr.va_mode & VSGID)
gid = vattr.va_gid;
}
}
/*
* XXX - should change VOP_ACCESS to not let super user
* always have it for exec permission on regular files.
*/
if (error = VOP_ACCESS(vp, VEXEC, u.u_cred))
goto bad;
if ((u.u_procp->p_flag & STRC) &&
(error = VOP_ACCESS(vp, VREAD, u.u_cred)))
goto bad;
if ((vattr.va_mode & (VEXEC | (VEXEC>>3) | (VEXEC>>6))) == 0) {
error = EACCES;
goto bad;
}
/*
* Read in first few bytes of file for segment sizes, ux_mag:
* OMAGIC = plain executable
* NMAGIC = RO text
* ZMAGIC = demand paged RO text
* Also an ASCII line beginning with #! is
* the file name of a ``shell'' and arguments may be prepended
* to the argument list if given here.
*
* SHELL NAMES ARE LIMITED IN LENGTH.
*
* ONLY ONE ARGUMENT MAY BE PASSED TO THE SHELL FROM
* THE ASCII LINE.
*/
u.u_exdata.ux_shell[0] = '\0'; /* for zero length files */
error = vn_rdwr(UIO_READ, vp, (caddr_t)&u.u_exdata,
sizeof (u.u_exdata), 0, UIO_SYSSPACE, IO_UNIT, &resid);
if (error != 0)
goto bad;
if (resid > sizeof (u.u_exdata) - sizeof (u.u_exdata.Ux_A) &&
u.u_exdata.ux_shell[0] != '#') {
error = ENOEXEC;
goto bad;
}
switch (u.u_exdata.ux_mag) {
case OMAGIC:
u.u_exdata.ux_dsize += u.u_exdata.ux_tsize;
u.u_exdata.ux_tsize = 0;
break;
case ZMAGIC:
case NMAGIC:
break;
default:
if (u.u_exdata.ux_shell[0] != '#' ||
u.u_exdata.ux_shell[1] != '!' ||
indir) {
error = ENOEXEC;
goto bad;
}
cp = &u.u_exdata.ux_shell[2]; /* skip "#!" */
while (cp != &u.u_exdata.ux_shell[SHSIZE]) {
if (*cp == '\t')
*cp = ' ';
else if (*cp == '\n') {
*cp = '\0';
break;
}
cp++;
}
if (*cp != '\0') {
error = ENOEXEC;
goto bad;
}
cp = &u.u_exdata.ux_shell[2];
while (*cp == ' ')
cp++;
execnamep = cp;
while (*cp && *cp != ' ')
*enp++ = *cp++;
*enp = '\0';
cfarg[0] = '\0';
if (*cp) {
*cp++ = '\0';
while (*cp == ' ')
cp++;
if (*cp)
bcopy((caddr_t)cp, (caddr_t)cfarg, SHSIZE);
}
indir = 1;
VN_RELE(vp);
vp = NULL;
if (error = pn_set(&pn, execnamep))
goto bad;
goto again;
}
/*
* Copy arguments into pageable kernel virtual memory
*/
na = ne = nc = 0;
cc = 0;
args = NULL;
if (uap->argp) for (;;) {
ap = NULL;
sharg = NULL;
if (indir && na == 0) {
sharg = exnam;
ap = (int)sharg;
uap->argp++; /* ignore argv[0] */
} else if (indir && (na == 1 && cfarg[0])) {
sharg = cfarg;
ap = (int)sharg;
} else if (indir && (na == 1 || na == 2 && cfarg[0])) {
ap = (int)uap->fname;
} else if (uap->argp) {
ap = fuword((caddr_t)uap->argp);
uap->argp++;
}
if (ap == NULL && uap->envp) {
uap->argp = NULL;
if ((ap = fuword((caddr_t)uap->envp)) != NULL)
uap->envp++, ne++;
}
if (ap == NULL)
break;
na++;
if (ap == -1) {
error = EFAULT;
goto bad;
}
do {
if (cc == 0) {
args = args_get(args);
if (args == NULL) {
error = u.u_error;
goto bad;
}
if (args_start == NULL)
args_start = args;
cc = sizeof (args->a_buf);
cp = args->a_buf;
}
if (sharg) {
error = copystr(sharg, cp, cc, &len);
sharg += len;
} else {
error = copyinstr((caddr_t)ap, cp, cc, &len);
ap += len;
}
cp += len;
nc += len;
cc -= len;
} while (error == ENAMETOOLONG);
if (error != 0)
goto bad;
}
nc = roundup(nc, NBPW);
#ifdef sparc
/*
* Make sure user register windows are empty before attempting to
* make a new stack.
*/
flush_user_windows();
error = getxfile(vp, SA(nc + (na + 4)*NBPW) + sizeof (struct rwindow),
u