0
0
mirror of https://github.com/thegeeklab/wp-docker-buildx.git synced 2024-11-13 22:50:41 +00:00

removed wrapdocker, removed storage_driver setting

This commit is contained in:
Brad Rydzewski 2015-09-04 12:08:59 -07:00
parent 31aefa6580
commit ff77470c11
3 changed files with 49 additions and 142 deletions

View File

@ -6,4 +6,4 @@ FROM rancher/docker:1.8.1
ADD drone-docker /go/bin/ ADD drone-docker /go/bin/
VOLUME /var/lib/docker VOLUME /var/lib/docker
ENTRYPOINT ["/go/bin/drone-docker"] ENTRYPOINT ["/usr/bin/dockerlaunch", "/go/bin/drone-docker"]

101
main.go
View File

@ -35,54 +35,9 @@ func main() {
plugin.Param("vargs", &vargs) plugin.Param("vargs", &vargs)
plugin.MustParse() plugin.MustParse()
// Set the storage driver
if len(vargs.Storage) == 0 {
vargs.Storage = "aufs"
}
stop := func() {
cmd := exec.Command("start-stop-daemon", "--stop", "--pidfile", "/var/run/docker.pid")
cmd.Stdout = os.Stdout
cmd.Stderr = os.Stderr
trace(cmd)
cmd.Run()
}
defer stop()
// Starts the Docker daemon
go func() {
args := []string{"/usr/bin/docker", "-d", "-s", vargs.Storage}
if vargs.Insecure && len(vargs.Registry) != 0 {
args = append(args, "--insecure-registry", vargs.Registry)
}
for _, value := range vargs.Dns {
args = append(args, "--dns", value)
}
cmd := exec.Command("/usr/bin/dockerlaunch", args...)
if os.Getenv("DOCKER_LAUNCH_DEBUG") == "true" {
cmd.Stdout = os.Stdout
cmd.Stderr = os.Stderr
} else {
cmd.Stdout = ioutil.Discard
cmd.Stderr = ioutil.Discard
}
trace(cmd)
cmd.Run()
}()
// Sleep for a few seconds
time.Sleep(5 * time.Second)
// Set the Registry value // Set the Registry value
if len(vargs.Registry) == 0 { if len(vargs.Registry) == 0 {
vargs.Registry = "https://index.docker.io/v1/" vargs.Registry = "https://index.docker.io/v1/"
} else {
vargs.Repo = fmt.Sprintf("%s/%s", vargs.Registry, vargs.Repo)
} }
// Set the Dockerfile path // Set the Dockerfile path
if len(vargs.File) == 0 { if len(vargs.File) == 0 {
@ -94,15 +49,53 @@ func main() {
} }
vargs.Repo = fmt.Sprintf("%s:%s", vargs.Repo, vargs.Tag) vargs.Repo = fmt.Sprintf("%s:%s", vargs.Repo, vargs.Tag)
go func() {
args := []string{"-d"}
// STORAGE DRIVER DISALBED FOR NOW
// if len(vargs.Storage) != 0 {
// args = append(args, "-s", vargs.Storage)
// }
if vargs.Insecure && len(vargs.Registry) != 0 {
args = append(args, "--insecure-registry", vargs.Registry)
}
for _, value := range vargs.Dns {
args = append(args, "--dns", value)
}
cmd := exec.Command("/usr/bin/docker", args...)
if os.Getenv("DOCKER_LAUNCH_DEBUG") == "true" {
cmd.Stdout = os.Stdout
cmd.Stderr = os.Stderr
} else {
cmd.Stdout = ioutil.Discard
cmd.Stderr = ioutil.Discard
}
trace(cmd)
cmd.Run()
}()
// ping Docker until available
for i := 0; i < 3; i++ {
cmd := exec.Command("/usr/bin/docker", "info")
cmd.Stdout = ioutil.Discard
cmd.Stderr = ioutil.Discard
err := cmd.Run()
if err == nil {
break
}
time.Sleep(time.Second * 5)
}
// Login to Docker // Login to Docker
if len(vargs.Username) != 0 { if len(vargs.Username) != 0 {
cmd := exec.Command("docker", "login", "-u", vargs.Username, "-p", vargs.Password, "-e", vargs.Email, vargs.Registry) cmd := exec.Command("/usr/bin/docker", "login", "-u", vargs.Username, "-p", vargs.Password, "-e", vargs.Email, vargs.Registry)
cmd.Dir = workspace.Path cmd.Dir = workspace.Path
cmd.Stdout = os.Stdout cmd.Stdout = os.Stdout
cmd.Stderr = os.Stderr cmd.Stderr = os.Stderr
err := cmd.Run() err := cmd.Run()
if err != nil { if err != nil {
stop()
os.Exit(1) os.Exit(1)
} }
} else { } else {
@ -110,38 +103,40 @@ func main() {
} }
// Docker environment info // Docker environment info
cmd := exec.Command("docker", "version") cmd := exec.Command("/usr/bin/docker", "version")
cmd.Stdout = os.Stdout cmd.Stdout = os.Stdout
cmd.Stderr = os.Stderr cmd.Stderr = os.Stderr
trace(cmd) trace(cmd)
cmd.Run() cmd.Run()
cmd = exec.Command("docker", "info") cmd = exec.Command("/usr/bin/docker", "info")
cmd.Stdout = os.Stdout cmd.Stdout = os.Stdout
cmd.Stderr = os.Stderr cmd.Stderr = os.Stderr
trace(cmd) trace(cmd)
cmd.Run() cmd.Run()
// Build the container // Build the container
cmd = exec.Command("docker", "build", "--pull=true", "--rm=true", "-t", vargs.Repo, vargs.File) cmd = exec.Command("/usr/bin/docker", "build", "--pull=true", "--rm=true", "-t", vargs.Repo, vargs.File)
cmd.Dir = workspace.Path cmd.Dir = workspace.Path
cmd.Stdout = os.Stdout cmd.Stdout = os.Stdout
cmd.Stderr = os.Stderr cmd.Stderr = os.Stderr
trace(cmd) trace(cmd)
err := cmd.Run() err := cmd.Run()
if err != nil { if err != nil {
stop()
os.Exit(1) os.Exit(1)
} }
if true {
return
}
// Push the container // Push the container
cmd = exec.Command("docker", "push", vargs.Repo) cmd = exec.Command("/usr/bin/docker", "push", vargs.Repo)
cmd.Dir = workspace.Path cmd.Dir = workspace.Path
cmd.Stdout = os.Stdout cmd.Stdout = os.Stdout
cmd.Stderr = os.Stderr cmd.Stderr = os.Stderr
trace(cmd) trace(cmd)
err = cmd.Run() err = cmd.Run()
if err != nil { if err != nil {
stop()
os.Exit(1) os.Exit(1)
} }
} }

View File

@ -1,88 +0,0 @@
#!/bin/bash
# Ensure that all nodes in /dev/mapper correspond to mapped devices currently loaded by the device-mapper kernel driver
dmsetup mknodes
# First, make sure that cgroups are mounted correctly.
CGROUP=/sys/fs/cgroup
: {LOG:=stdio}
[ -d $CGROUP ] ||
mkdir $CGROUP
mountpoint -q $CGROUP ||
mount -n -t tmpfs -o uid=0,gid=0,mode=0755 cgroup $CGROUP || {
echo "Could not make a tmpfs mount. Did you use --privileged?"
exit 1
}
if [ -d /sys/kernel/security ] && ! mountpoint -q /sys/kernel/security
then
mount -t securityfs none /sys/kernel/security || {
echo "Could not mount /sys/kernel/security."
echo "AppArmor detection and --privileged mode might break."
}
fi
# Mount the cgroup hierarchies exactly as they are in the parent system.
for SUBSYS in $(cut -d: -f2 /proc/1/cgroup)
do
[ -d $CGROUP/$SUBSYS ] || mkdir $CGROUP/$SUBSYS
mountpoint -q $CGROUP/$SUBSYS ||
mount -n -t cgroup -o $SUBSYS cgroup $CGROUP/$SUBSYS
# The two following sections address a bug which manifests itself
# by a cryptic "lxc-start: no ns_cgroup option specified" when
# trying to start containers withina container.
# The bug seems to appear when the cgroup hierarchies are not
# mounted on the exact same directories in the host, and in the
# container.
# Named, control-less cgroups are mounted with "-o name=foo"
# (and appear as such under /proc/<pid>/cgroup) but are usually
# mounted on a directory named "foo" (without the "name=" prefix).
# Systemd and OpenRC (and possibly others) both create such a
# cgroup. To avoid the aforementioned bug, we symlink "foo" to
# "name=foo". This shouldn't have any adverse effect.
echo $SUBSYS | grep -q ^name= && {
NAME=$(echo $SUBSYS | sed s/^name=//)
ln -s $SUBSYS $CGROUP/$NAME
}
# Likewise, on at least one system, it has been reported that
# systemd would mount the CPU and CPU accounting controllers
# (respectively "cpu" and "cpuacct") with "-o cpuacct,cpu"
# but on a directory called "cpu,cpuacct" (note the inversion
# in the order of the groups). This tries to work around it.
[ $SUBSYS = cpuacct,cpu ] && ln -s $SUBSYS $CGROUP/cpu,cpuacct
done
# Note: as I write those lines, the LXC userland tools cannot setup
# a "sub-container" properly if the "devices" cgroup is not in its
# own hierarchy. Let's detect this and issue a warning.
grep -q :devices: /proc/1/cgroup ||
echo "WARNING: the 'devices' cgroup should be in its own hierarchy."
grep -qw devices /proc/1/cgroup ||
echo "WARNING: it looks like the 'devices' cgroup is not mounted."
# Now, close extraneous file descriptors.
pushd /proc/self/fd >/dev/null
for FD in *
do
case "$FD" in
# Keep stdin/stdout/stderr
[012])
;;
# Nuke everything else
*)
eval exec "$FD>&-"
;;
esac
done
popd >/dev/null
# If a pidfile is still around (for example after a container restart),
# delete it so that docker can start.
rm -rf /var/run/docker.pid