Deploy a static website to nomad as go binary

Deploy a static website to nomad as go binary

·

3 min read

Building a whole ci pipeline and a small go program to avoid a docker container seems fun at first.. but looks kinda op a few days later..

Anyway if you'd like to serve you static sites compiled in go binary via nomad (hashicorps sane kubernetes alternative [a hill I am willing to die on]) here you go!

Place main.go in the same dir as the one the static site generator generates.

In my case it was zola, project name website. -> website/main.go

Replace public/public <- with where your static files are generated (replace all: also after //go:embed !)

package main

import (
    "embed"
    "fmt"
    "io/fs"
    "log"
    "net/http"
    "os"
)

//go:embed public/public
var staticFiles embed.FS

func main() {

    http.Handle("/", fsHandler())

    port := os.Getenv("NOMAD_PORT_http")
    if port == "" {
        port = "3000"
    }

    log.Printf("Listening on :%s...\n", port)
    err := http.ListenAndServe(fmt.Sprintf(":%s", port), nil)
    if err != nil {
        log.Fatal(err)
    }
}

func fsHandler() http.Handler {
    sub, err := fs.Sub(staticFiles, "public/public")
    if err != nil {
        panic(err)
    }

    return http.FileServer(http.FS(sub))
}

Set your ci pipeline to something along the lines of this to generate your site, compile your static site into static go binary and have it shipped via s3 to nomad.

---
variables:
  REPO_NAME: git.code.none/yournamehere/website
  GIT_SUBMODULE_STRATEGY: recursive
  GIT_DEPTH: 5

# before_script:
#  -

stages:
  - build
  - deploy

build:
  stage: build
  script:
    - cd public
    - zola build
    - cd ..
    - GOOS=linux; GOARCH=amd64; CGO_ENABLED=0 GOOS=linux GOARCH=amd64 go build -a -trimpath -ldflags "-s -w -extldflags '-static'" -o website-$GOOS-$GOARCH.bin
    - GOOS=linux; GOARCH=amd64; tar -czvf website-$GOOS-$GOARCH.tgz website-$GOOS-$GOARCH.bin
    - GOOS=linux; GOARCH=amd64; mc mv website-$GOOS-$GOARCH.tgz s3/git-releases/
  rules:
    - if: $CI_COMMIT_BRANCH == $CI_DEFAULT_BRANCH && $CI_COMMIT_TAG == null
    - if: $CI_COMMIT_BRANCH != $CI_DEFAULT_BRANCH && $CI_COMMIT_TAG == null
      # when: manual

deploy:
  stage: deploy
  needs: ["build"]
  script:
    # DEPLOY NOMAD staging
    - mc_user=$(echo "$MC_HOST_s3" | awk -F'[:/@]' '{print $4}'); mc_pass=$(echo "$MC_HOST_s3" | awk -F'[:/@]' '{print $5}'); mc_host=$(echo "$MC_HOST_s3" | awk -F'[:/@]' '{print $6}')
    - sed -i "s,MCUSER,$mc_user,g" nomad.hcl; sed -i "s,MCPASS,$mc_pass,g" nomad.hcl; sed -i "s,MCHOST,$mc_host,g" nomad.hcl
    - nomad validate nomad.hcl
    - nomad plan nomad.hcl 2>&1 |tee .plan.log  ||  echo
    - nomad run nomad.hcl
  rules:
    - if: $CI_COMMIT_BRANCH == $CI_DEFAULT_BRANCH && $CI_COMMIT_TAG == null
    - if: $CI_COMMIT_BRANCH != $CI_DEFAULT_BRANCH && $CI_COMMIT_TAG == null
      # when: manual

And to have it mostly together.. here's the nomad deployment file to actually run your static-site-bin-blog-log whatever you name it :)

job "website" {
  datacenters = ["dc"]
  type = "service"
  meta {
    run_uuid = "${uuidv4()}"
  }
  update {
    max_parallel      = 1
    health_check      = "checks"
    min_healthy_time  = "10s"
    healthy_deadline  = "5m"
    progress_deadline = "10m"
    auto_revert       = true
    auto_promote      = true
    canary            = 1
    stagger           = "10s"
  }
  migrate {
    max_parallel = 1
    health_check = "checks"
    min_healthy_time = "10s"
    healthy_deadline = "5m"
  }
  group "website" {
    count = 1
    restart {
      attempts = 3
      interval = "30m"
      delay = "15s"
      mode = "fail"
    }
    service {
      tags = ["website"]
      name = "website"

      port = "http"

      check {
        type     = "http"
        path     = "/"
        interval = "30s"
        timeout  = "2s"
      }
    }
    network {
      port "http" {}
    }
    task "website" {
      driver = "exec"
      config {
        command = "website-linux-amd64.bin"
      }
      artifact {
        source = "s3://MCHOST/git-releases/website-linux-amd64.tgz"
        options {
          aws_access_key_id     = "MCUSER"
          aws_access_key_secret = "MCPASS"
        }
      }
      resources {
        cpu    = 256
        memory = 256
      }
      logs {
        max_files     = 10
        max_file_size = 10
      }
    }
  }
}