Update dependency thegeeklab/hugo-geekblog to v0.7.0 - autoclosed #3

Closed
renovator wants to merge 265 commits from renovate/thegeeklab-hugo-geekblog-0.x into master
67 changed files with 6259 additions and 160 deletions

View File

@ -35,3 +35,46 @@ OpenSuse
x86
systemd
Golang
[D|d]irenv
semver
CLI
PyPi
readme
SSL
Telegraf
OSCP
JSON
DockerHub
starlark
Graylog
Opensearch
Elasticsearch
EOL
lifecycle
ownCloud
Lidl
Schwarz
Kaufland
Barsinghausen
DHBW
ERP
IHK
Baden-Wuerttemberg
Cottbus
Tradfri
UI
Zigbee2MQTT
Zigbee
MQTT
Terraform
Terraforms
Backblaze
AES-256
base64
Univention
RPC
DNS
LDAP
OpenLDAP
UCS
UDM

View File

@ -10,74 +10,83 @@ concurrency:
limit: 1
steps:
- name: generate
image: thegeeklab/alpine-tools
commands:
- make doc
- name: generate
image: thegeeklab/alpine-tools
commands:
- make doc
- name: markdownlint
image: thegeeklab/markdownlint-cli
commands:
- markdownlint 'content/**/*.md' 'README.md'
- name: markdownlint
image: thegeeklab/markdownlint-cli
commands:
- markdownlint 'content/**/*.md' 'README.md'
- name: spellcheck
image: node:lts-alpine
commands:
- npm install -g spellchecker-cli
- spellchecker --files 'content/**/*.md' 'README.md' -d .dictionary -p spell indefinite-article syntax-urls frontmatter --frontmatter-keys title tags --no-suggestions
environment:
FORCE_COLOR: true
NPM_CONFIG_LOGLEVEL: error
- name: spellcheck
image: thegeeklab/alpine-tools
commands:
- spellchecker --files 'content/**/*.md' 'README.md' -d .dictionary -p spell indefinite-article syntax-urls frontmatter --frontmatter-keys title tags --no-suggestions
environment:
FORCE_COLOR: true
NPM_CONFIG_LOGLEVEL: error
- name: testbuild
image: klakegg/hugo:0.74.3-ext-alpine
commands:
- hugo-official -b http://localhost/
- name: favicons
image: node:lts-alpine
commands:
- npm install > /dev/null
- npm run build
environment:
FORCE_COLOR: true
NPM_CONFIG_LOGLEVEL: error
- name: link-validation
image: thegeeklab/link-validator
commands:
- link-validator -ro
environment:
LINK_VALIDATOR_BASE_DIR: public/
- name: testbuild
image: thegeeklab/hugo:0.114.0
commands:
- hugo --panicOnWarning -b http://localhost:8000/
- name: build
image: klakegg/hugo:0.74.3-ext-alpine
commands:
- hugo-official
- name: link-validation
image: thegeeklab/link-validator
commands:
- "link-validator --color=always --rate-limit 10 --header '\"user-agent: curl\"' -e https://matrix.to.* -e https://www.nginx.com.*"
environment:
LINK_VALIDATOR_BASE_DIR: public/
LINK_VALIDATOR_RETRIES: 3
- name: beautify
image: node:lts-alpine
commands:
- npm install -g js-beautify
- html-beautify -r -f 'public/**/*.html'
environment:
FORCE_COLOR: true
NPM_CONFIG_LOGLEVEL: error
- name: build
image: thegeeklab/hugo:0.114.0
commands:
- hugo --panicOnWarning
- name: publish
image: plugins/s3-sync
settings:
access_key:
from_secret: s3_access_key
bucket: thegeeklab-root
delete: true
endpoint: https://sp.rknet.org
path_style: true
secret_key:
from_secret: s3_secret_access_key
source: public/
strip_prefix: public/
when:
ref:
- refs/heads/master
- refs/tags/**
- name: beautify
image: node:lts-alpine
commands:
- npm install -g js-beautify
- html-beautify -r -f 'public/**/*.html'
environment:
FORCE_COLOR: true
NPM_CONFIG_LOGLEVEL: error
- name: publish
image: thegeeklab/drone-s3-sync:2
settings:
access_key:
from_secret: s3_access_key
bucket: thegeeklab-root
delete: true
endpoint: https://sp.rknet.org
path_style: true
secret_key:
from_secret: s3_secret_access_key
source: public/
strip_prefix: public/
when:
ref:
- refs/heads/main
- refs/tags/**
trigger:
ref:
- refs/heads/master
- refs/tags/**
- refs/pull/**
- refs/heads/main
- refs/tags/**
- refs/pull/**
---
kind: pipeline
@ -88,32 +97,36 @@ platform:
arch: amd64
steps:
- name: matrix
image: plugins/matrix
settings:
homeserver:
from_secret: matrix_homeserver
password:
from_secret: matrix_password
roomid:
from_secret: matrix_roomid
template: "Status: **{{ build.status }}**<br/> Build: [{{ repo.Owner }}/{{ repo.Name }}]({{ build.link }}) ({{ build.branch }}) by {{ build.author }}<br/> Message: {{ build.message }}"
username:
from_secret: matrix_username
- name: matrix
image: thegeeklab/drone-matrix
settings:
homeserver:
from_secret: matrix_homeserver
password:
from_secret: matrix_password
roomid:
from_secret: matrix_roomid
template: "Status: **{{ .Build.Status }}**<br/> Build: [{{ .Repo.Owner }}/{{ .Repo.Name }}]({{ .Build.Link }}){{ if .Build.Branch }} ({{ .Build.Branch }}){{ end }} by {{ .Commit.Author }}<br/> Message: {{ .Commit.Message.Title }}"
username:
from_secret: matrix_username
when:
status:
- success
- failure
trigger:
ref:
- refs/heads/master
- refs/tags/**
- refs/heads/main
- refs/tags/**
status:
- success
- failure
- success
- failure
depends_on:
- build
- build
---
kind: signature
hmac: 12d9076542e8ce395c437f3c6dd303b1aeb9c64d792a04c13ac450f4f2148600
hmac: 73f94f8f757b935eb71b930d612d8c86d589a3e03cde8d2eaa2c40c1d3668397
...

19
.gitignore vendored
View File

@ -1,3 +1,16 @@
themes/
public/
resources/_gen/
# local environments
.swp
.env*
/dist/
/build/
/node_modules/
# auto-generated files
/themes/
/public/
/static/
/assets/sprites/
/resources/
# hugo
.hugo_build.lock

View File

@ -1,8 +1,9 @@
*.html
.drone.yml
search*.js
_normalize.css
.lighthouseci/
themes/
static/js/
src/favicon/
list.json.json
/.lighthouseci/
/themes/
/static/js/
/src/favicon/
LICENSE

14
.prettierrc Normal file
View File

@ -0,0 +1,14 @@
{
"printWidth": 99,
"singleQuote": false,
"semi": false,
"trailingComma": "none",
"overrides": [
{
"files": ["*.html"],
"options": {
"parser": "go-template"
}
}
]
}

29
LICENSE
View File

@ -1,20 +1,21 @@
The MIT License (MIT)
MIT License
Copyright (c) 2020 Robert Kaussow
Copyright (c) 2022 Robert Kaussow <mail@thegeeklab.de>
Permission is hereby granted, free of charge, to any person obtaining a copy of
this software and associated documentation files (the "Software"), to deal in
the Software without restriction, including without limitation the rights to
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
the Software, and to permit persons to whom the Software is furnished to do so,
subject to the following conditions:
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is furnished
to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
The above copyright notice and this permission notice (including the next
paragraph) shall be included in all copies or substantial portions of the
Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS
OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF
OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.

View File

@ -1,8 +1,9 @@
# renovate: datasource=github-releases depName=thegeeklab/hugo-geekblog
THEME_VERSION := v0.6.1
THEME_VERSION := v0.23.7
THEME := hugo-geekblog
BASEDIR := .
THEMEDIR := $(BASEDIR)/themes
YEAR := $(shell date +"%Y")
.PHONY: all
all: doc
@ -17,4 +18,9 @@ doc-assets:
.PHONY: clean
clean:
rm -rf $(THEMEDIR) && \
rm -rf $(THEMEDIR)
.PHONY: hugo-new
hugo-new:
hugo new posts/$(YEAR)/$(title)/index.md ; \
mkdir -p content/posts/$(YEAR)/$(title)/images

13
archetypes/posts.md Normal file
View File

@ -0,0 +1,13 @@
---
title: "{{ replace .Name "-" " " | title }}"
date: {{ .Date }}
authors:
- robert-kaussow
tags: []
resources:
- name: feature
src: "images/feature.jpg"
params:
anchor: Center
credits: ""
---

View File

@ -7,6 +7,7 @@ pygmentsCodeFences: true
enableGitInfo: true
paginate: 5
enableEmoji: True
markup:
goldmark:
@ -16,6 +17,9 @@ markup:
startLevel: 1
endLevel: 9
permalinks:
posts: /posts/:year/:month/:title/
taxonomies:
author: authors
tag: tags
@ -83,7 +87,7 @@ params:
but mainly about topics from the Linux and Open Source world.
subtitle: Linux and Open Source blog
images:
- "thegeeklab_avatar.png"
- "socialmedia2.png"
geekblogToC: 3
geekblogAuthor: robert-kaussow

Binary file not shown.

After

Width:  |  Height:  |  Size: 72 KiB

118
content/about/index.md Normal file
View File

@ -0,0 +1,118 @@
---
title: About Me
resources:
- name: profile
src: "images/profile.jpg"
params:
credits: "[Angelina Litvin](https://unsplash.com/@linalitvina) on [Unsplash](https://unsplash.com/s/photos/writing)"
---
Hi, I'm Robert. On my private blog I write about everything that comes to my mind, but mainly about topics from the Linux and open source world. I'm a Linux Engineer, open source enthusiast and a self-hosting geek.
My Linux journey began almost 10 years ago, for more than half of them maintaining and managing Linux server deployments in professional environments is my daily business.
---
## Experience
{{< columns size=small >}}
**[ownCloud](https://owncloud.com/)**\
System Administrator\
May 2019
<--->
ownCloud is an open-source file sync, share and content collaboration software. As a System Administrator at ownCloud I'm responsible for the operation and availability of the company infrastructure. That also including the Continuous Integration platform based on Drone CI which is a fundamental component for the development of our products.
{{< /columns >}}
{{< columns size=small >}}
**[Schwarz IT](https://it.schwarz/en)**\
Linux Engineer\
Oct 2017 - Apr 2019
<--->
The Schwarz IT is the central IT service provider of the Schwarz Group where the main customers are the two retail divisions Lidl and Kaufland. During my work as part of the Linux Server team, one challenge was to design a test-driven infrastructure environment using Ansible and Ansible Tower for more than 4.000 Linux servers in three different data centers to replace the existing Puppet configuration management.
{{< /columns >}}
{{< columns size=small >}}
**Kaufland**\
First Level Support\
Jul 2013 - Aug 2014
<--->
While working in the First Level Support Team of the Kaufland distribution center of Barsinghausen, the main task was to provide support for all problems related to the soft- and hardware environment of the employees and the retail ERP solution (SAP).
{{< /columns >}}
---
## Education
{{< columns size=small >}}
**BS Business Informatics**\
DHBW Baden-Wuerttemberg\
Sep 2014 - Sep 2017
<--->
Bachelor thesis: "Selection and prototypical implementation of a configuration management solution for Linux."
{{< /columns >}}
{{< columns size=small >}}
**IT specialist**\
IHK Cottbus\
Sep 2010 - Jun 2013
<--->
{{< /columns >}}
---
## Skills
{{< columns >}}
## Operations
<!-- prettier-ignore-start -->
<!-- spellchecker-disable -->
{{% progress title=Linux value=90 icon=geeklab_tux %}}
{{% progress title=Ansible value=100 icon=geeklab_ansible %}}
{{% progress title=Terraform value=80 icon=geeklab_terraform %}}
{{% progress title=Docker value=80 icon=geeklab_docker %}}
{{% progress title=Kubernetes value=15 icon=geeklab_kubernetes %}}
<!-- spellchecker-enable -->
<!-- prettier-ignore-end -->
<--->
## Development
<!-- prettier-ignore-start -->
<!-- spellchecker-disable -->
{{% progress title=Python value=90 icon=geeklab_python %}}
{{% progress title=Golang value=70 icon=geeklab_golang %}}
<!-- spellchecker-enable -->
<!-- prettier-ignore-end -->
{{< /columns >}}
---
## Contact
<!-- prettier-ignore-start -->
<!-- spellchecker-disable -->
{{< boxes "contact" >}}
{{< box size=large title=E-Mail icon=gblog_email >}}mail [ett] thegeeklab.de{{< /box >}}
{{< box size=large title=Matrix icon=gblog_matrix >}}[@xoxys:rknet.org](https://matrix.to/#/@xoxys:rknet.org){{< /box >}}
{{< box size=large title=XMPP icon=gblog_xmpp >}}xoxys\@trashserver.net{{< /box >}}
{{< box size=large title=Mastodon icon=gblog_mastodon >}}[@xoxys@social.tchncs.de](https://social.tchncs.de/@xoxys){{< /box >}}
{{< box size=large title=GitHub icon=gblog_github >}}[github.com/xoxys](https://github.com/xoxys){{< /box >}}
{{< box size=large title=Gitea icon=gblog_gitea >}}[gitea.rknet.org/xoxys](https://gitea.rknet.org/xoxys){{< /box >}}
{{< /boxes >}}
<!-- spellchecker-enable -->
<!-- prettier-ignore-end -->

View File

@ -1,13 +0,0 @@
---
title: "Get in touch"
---
- E-Mail: <!-- spellchecker-disable -->mail [ett] thegeeklab.de<!-- spellchecker-enable -->
- Matrix: [@xoxys:rknet.org](https://matrix.to/#/@xoxys:rknet.org)
- XMPP: xoxys\@trashserver.net
## More online profiles
- GitHub: [github.com/xoxys](https://github.com/xoxys)
- Gitea: [gitea.rknet.org/xoxys](https://gitea.rknet.org/xoxys)
- Mastodon [@xoxys@social.tchncs.de](https://social.tchncs.de/@xoxys)

View File

@ -1,6 +1,8 @@
---
title: "Ansible and the relations to the inventory"
date: 2020-08-03T22:45:00+02:00
aliases:
- /posts/ansible-and-the-relations-to-the-inventory/
authors:
- robert-kaussow
tags:
@ -22,7 +24,7 @@ I love Ansible and I'm pretty happy to have this configuration management soluti
I've finished the first steps really quickly. After I've added a script for my home lab Proxmox VE host and a static inventory for a bunch of my Raspberries, I did a check with `ansible-inventory -i inventory/ --list` and everything seems to be working as expected. All my hosts were listed and Ansible groups were applied as well. But here comes the clue: The test Playbook run failed, it looks like Ansible was not able to find the required variables. What the heck...
{{< hint info >}}
{{< hint type=note >}}
**Pro tip**\
There is one thing you should keep in mind, variables are related to the inventory file(s) or Playbooks.
{{< /hint >}}

View File

Before

Width:  |  Height:  |  Size: 258 KiB

After

Width:  |  Height:  |  Size: 258 KiB

View File

@ -1,6 +1,8 @@
---
title: "Color palettes on Game Boy Color and Advance"
date: 2020-09-15T21:45:00+02:00
aliases:
- /posts/color-palettes-on-gbc-and-gba/
authors:
- robert-kaussow
tags:

View File

@ -1,6 +1,8 @@
---
title: "Create a static site hosting platform"
date: 2020-07-30T01:05:00+02:00
aliases:
- /posts/create-a-static-site-hosting-platform/
authors:
- robert-kaussow
tags:
@ -8,7 +10,7 @@ tags:
- Sysadmin
resources:
- name: feature
src: 'images/feature.jpg'
src: "images/feature.jpg"
params:
anchor: Center
credits: >
@ -17,8 +19,10 @@ resources:
---
There are a lot of static site generators out there and users have a lot of possibilities to automate and continuously deploy static sites these days. Solutions like GitHub pages or Netlify are free to use and easy to set up, even a cheap webspace could work. If one of these services is sufficient for your use case you could stop reading at this point.
<!--more-->
As I wanted to have more control over such a setup and because it might be fun I decided to create my own service. Before we look into the setup details, lets talk about some requirements:
As I wanted to have more control over such a setup and because it might be fun I decided to create my own service. Before looking into the setup details, lets talk about some requirements:
- deploy multiple project documentation
- use git repository name as subdomain
@ -33,9 +37,9 @@ Of course, Minio could be removed from the stack but after a few tests, my perso
As a first step, install Minio and Nginx on a server, I will not cover the basic setup in this guide. To simplify the setup I will use a single server for Minio and Nginx but it's also possible to split this into a Two-Tier architecture.
After the basic setup we need to create a Minio bucket e.g. `mydocs` using the Minio client command `mc mb local/mydocs`. To allow Nginx to access these bucket to deliver the pages without authentication we need to set a bucket policy `mc policy set download local/mydocs`. This policy will allow public read access. In theory, it should also be possible to add authentication headers to Nginx to server sites from private buckets but I have not tried that on my own.
After the basic setup it's required to create a Minio bucket e.g. `mydocs` using the Minio client command `mc mb local/mydocs`. To allow Nginx to access these bucket to deliver the pages without authentication a bucket policy `mc policy set download local/mydocs` need to be set. This policy will allow public read access. In theory, it should also be possible to add authentication headers to Nginx to server sites from private buckets but I have not tried that on my own.
Preparing the Minio bucket was the easy part, now we need to teach Nginx to rewrite the subdomains to sub-directories and deliver the sites properly. Let us assume we are still using `mydocs` as the base Minio bucket and `mydocs.com` as root domain. Here is how my current vHost configuration looks like:
Preparing the Minio bucket was the easy part, now Nginx need to know how to rewrite the subdomains to sub-directories and properly deliver the sites. Let's assume `mydocs` is still used as the base Minio bucket and `mydocs.com` as root domain. Here is how my current vHost configuration looks like:
<!-- prettier-ignore-start -->
<!-- markdownlint-disable -->
@ -90,13 +94,13 @@ server {
We will go through this configuration to understand how it works.
__*Lines 1-3*__ defines a backend, in this case it's the Minio server running on `localhost:61000`.
**_Lines 1-3_** defines a backend, in this case it's the Minio server running on `localhost:61000`.
__*Lines 5-10*__ should also be straight forward, this block will redirect HTTP to HTTPS.
**_Lines 5-10_** should also be straight forward, this block will redirect HTTP to HTTPS.
__*Line 14*__ is where the magic starts. We are using a named regular expression to capture the first part of the subdomain and translate it into the bucket sub-directory. For a given URL like `demoproject.mydocs.com` Nginx will try to serve `mydocs/demoproject` from the Minio server. That's what __*Line 23*__ does. Some of you may notice that the used variable `${request_path}` is not defined in the vHost configuration.
**_Line 14_** is where the magic starts. A named regular expression is used to capture the first part of the subdomain and translate it into the bucket sub-directory. For a given URL like `demoproject.mydocs.com` Nginx will try to serve `mydocs/demoproject` from the Minio server. That's what **_Line 23_** does. Some of you may notice that the used variable `${request_path}` is not defined in the vHost configuration.
Right, we need to add another configuration snippet to the `nginx.conf`. But why do we need this variable at all? For me, that was the hardest part to solve. As the setup is using `proxy_pass` Nginx will *not* try to lookup `index.html` automatically. That's a problem because every folder will at least contain an `index.html`. In general, it's required to tell Nginx to rewrite the request URI to `/index.html` if the origin is a folder and ends with `/`. One way would be an `if` condition in the vHost configuration but such conditions are evil[^if-is-evil] in most cases and should be avoided if possible. Luckily there is a better option:
Right, another configuration snippet needs to be added to the `nginx.conf`. But why is this variable required at all? For me, that was the hardest part to solve. As the setup is using `proxy_pass` Nginx will _not_ try to lookup `index.html` automatically. That's a problem because every folder will at least contain an `index.html`. In general, it's required to tell Nginx to rewrite the request URI to `/index.html` if the origin is a folder and ends with `/`. One way would be an `if` condition in the vHost configuration but such conditions are evil[^if-is-evil] in most cases and should be avoided if possible. Luckily there is a better option:
<!-- prettier-ignore-start -->
<!-- markdownlint-disable -->
@ -113,7 +117,7 @@ map $request_uri $request_path {
[Nginx maps](https://nginx.org/en/docs/http/ngx_http_map_module.html) are a solid way to create conditionals. In this example set `$request_uri` as input and `$request_path` as output. Each line between the braces is a condition. The first line will simply apply `$request_uri` to the output variable if no other condition match. The second condition applies `${request_uri}index.html` to the output variable if the input variable ends with a slash (and therefor is a directory).
__*Line 38-41*__ of the vHost configuration tries to deliver the custom error page of your site and will fallback to the default Nginx error page.
**_Line 38-41_** of the vHost configuration tries to deliver the custom error page of your site and will fallback to the default Nginx error page.
We are done! Nginx should now be able to server your static sites from a sub-directory of the Minio source bucket. I'm using it since a few weeks and I'm really happy with the current setup.

View File

@ -1,6 +1,8 @@
---
title: "Docker port publishing for localhost bindings"
date: 2020-09-08T22:15:00+02:00
aliases:
- /posts/docker-port-publishing-for-localhost-bindings/
authors:
- robert-kaussow
tags:
@ -13,7 +15,7 @@ resources:
params:
anchor: Center
credits: >
[Andy Li](https://unsplash.com/@andasta) on
Andy Li (@andasta)
[Unsplash](https://unsplash.com/s/photos/container)
---

View File

Before

Width:  |  Height:  |  Size: 258 KiB

After

Width:  |  Height:  |  Size: 258 KiB

View File

Before

Width:  |  Height:  |  Size: 306 KiB

After

Width:  |  Height:  |  Size: 306 KiB

View File

@ -1,6 +1,8 @@
---
title: "How to modernize a Game Boy"
date: 2020-09-13T23:45:00+02:00
aliases:
- /posts/modernize-a-game-boy-advance/
authors:
- robert-kaussow
tags:

View File

@ -1,6 +1,8 @@
---
title: "Run an ARM32 Docker daemon on ARM64 servers"
date: 2020-09-24T10:30:00+02:00
aliases:
- /posts/run-arm32-docker-daemon-on-arm64-servers/
authors:
- robert-kaussow
tags:

View File

Before

Width:  |  Height:  |  Size: 220 KiB

After

Width:  |  Height:  |  Size: 220 KiB

View File

@ -1,13 +1,15 @@
---
title: "Welcome (back)"
date: 2020-07-21T23:00:08+02:00
aliases:
- /posts/welcome/
authors:
- robert-kaussow
tags:
- General
resources:
- name: feature
src: 'images/feature.jpg'
src: "images/feature.jpg"
params:
anchor: Center
credits: >
@ -16,7 +18,9 @@ resources:
---
As some former readers may have noticed, "geeklabor.de" has been renamed to "thegeeklab.de", welcome back nice to have you here again. If you are a first time visitor, a very warm welcome goes to you as well. This is my private blog, where I write about everything that comes to my mind but mainly about topics from the Linux and Open Source world.
<!--more-->
For those of you who are interested in the backgrounds about the blog migration, here you go:
- my old theme had to be reworked, [hugo-geekblog](https://github.com/thegeeklab/hugo-geekblog) was born

Binary file not shown.

After

Width:  |  Height:  |  Size: 332 KiB

View File

@ -0,0 +1,37 @@
---
title: "Toolbox 1: direnv"
date: 2021-05-17T21:24:00+01:00
aliases:
- /posts/toolbox-1-direnv/
authors:
- robert-kaussow
tags:
- Sysadmin
- Automation
resources:
- name: feature
src: "images/feature.jpg"
params:
anchor: Center
credits: >
[Fleur](https://unsplash.com/@yer_a_wizard) on
[Unsplash](https://unsplash.com/photos/dQf7RZhMOJU)
---
We all use many different tools every day e.g. for our work, automation or better productivity. In the series "Toolbox" I would like to present such applications that have made my day-to-day work so much easier. All applications are free and open source software developed by big tech companies as well as lovingly handcrafted hobby projects. If you also know an awesome tool that has changed your life, I would love to hear from you on [Mastodon](https://social.tchncs.de/@xoxys).
## direnv
The basic function of [direnv](https://direnv.net/) is pretty simple, it manages environment variables depending on the current directory you are in. This may sound not very helpful, but it could save a lot of work, especially if you have to deal with many different project environments. I use it a lot combined with Ansible. Depending on the deployment environment I need to set a different remote user, also the roles should be loaded from a different base directory depending on whether it is a test or production environment.
As long as the required configuration can be set by an environment variable, direnv can handle it. All you have to do is to create a `.envrc` file in the directory where it should be loaded. A simple configuration could look like this:
```Shell
$ cat .envrc
export ANSIBLE_ROLES_PATH=/home/xoxys/devel/.roles/staging
export ANSIBLE_REMOTE_USER=project-1
```
Each time you create or modify an `.envrc` file, you must approve the modification by running `direnv allow`. This prevents direnv from loading a file that has been modified or created by another malicious process, for example. After you have approved the file, direnv will automatically load it when you navigate to the directory where your configuration is located. To make it even better, it doesn't have to be the root directory, the file will also be loaded if you navigate directly to a sub-directory. When you leave the directory, your environment will also be unloaded automatically.
Direnv is written in Golang and available in most Linux Distribution [repositories](https://direnv.net/docs/installation.html#from-system-packages). Binary builds can be downloaded from the [GitHub Release](https://github.com/direnv/direnv/releases) page as well.

Binary file not shown.

After

Width:  |  Height:  |  Size: 332 KiB

View File

@ -0,0 +1,45 @@
---
title: "Toolbox 2: git-plus"
date: 2021-06-19T09:50:00+01:00
aliases:
- /posts/toolbox-2-git-plus/
authors:
- robert-kaussow
tags:
- Sysadmin
- Automation
resources:
- name: feature
src: "images/feature.jpg"
params:
anchor: Center
credits: >
[Fleur](https://unsplash.com/@yer_a_wizard) on
[Unsplash](https://unsplash.com/photos/dQf7RZhMOJU)
---
If you work with a lot of Git repositories on a regular basis, you're bound to run into the situation where you need to make changes to multiple repositories sooner or later. While it would be possible to run your Git commands in a shell loop over everything repositories, it is often tedious to type the command or remember the correct syntax.
<!--more-->
This is where [git-plus](https://github.com/tkrajina/git-plus) comes into play. It is a small collection of Git utilities that tries to simplify some common tasks:
- `git multi` execute a single Git command on multiple Git repositories
- `git relation` show a relation between two branches/commits/tags
- `git old-branches` find old/unused branches
- `git recent` list branches ordered by last commit time
- `git semver` lists and creates Git semver (semantic versioning) tags
But there is one command that I use every day now, `git multi`. The command works folder based, that means you have to have all target repositories in one directory. There is no grouping feature at the moment, if you want to group some repositories you would have to use multiple sub-directories. Since I also use [direnv](/posts/toolbox-2-git-plus/), this folder-based workflow works very well for me. Repositories can be temporarily excluded with the CLI flag `-e reponame` or by a `.multigit_ignore` file in the parent directory. Basically `git multi` executes normal Git commands and is therefore quite easy to use. One example where the tool helps me is when I need to adjust the CI configuration in multiple repositories, for example. Only a few steps are needed for this:
- Create a new branch: \
`git multi checkout -b "replace-ci-step"`
- Use your favorite search and replace tool to add your changes to all repositories
- Create a commit: \
`git multi add -A; git multi commit -m "ci: replace broken step in ci config"`
- Finally push your new branch: \
`git multi push origin replace-ci-step`
That's it. Simple, isn't it? Of course it doesn't work in all cases, especially for more complex and repository specific changes it's getting harder, but for generic changes like CI configurations, the current year in a copyright string or a license file or globally used badges in the readme it's really straight forward.
`git-plus` is written in Python and available on [PyPi](https://pypi.org/project/git-plus/).

Binary file not shown.

After

Width:  |  Height:  |  Size: 192 KiB

View File

@ -0,0 +1,84 @@
---
title: "Collect JSON metrics with Telegraf"
date: 2022-03-13T12:10:00+01:00
authors:
- robert-kaussow
tags:
- Sysadmin
resources:
- name: feature
src: "images/feature.jpg"
params:
anchor: Center
credits: >
[Nicholas Cappello](https://unsplash.com/@bash__profile) on
[Unsplash](https://unsplash.com/photos/Wb63zqJ5gnE)
---
Telegraf is a powerful, plugin based metrics collector that also provides Prometheus compatible outputs. For various purposes, there are a number of input plugins that can collect metrics from various sources. Even more powerful are the processor plugins that allow metrics to be processed and manipulated as they pass through, and immediately output results based on the values they process. In this short blog post I'll explain how to fetch JSON metrics from the Docker registry API to track some data of a DockerHub Repository.
<!--more-->
Even though Prometheus has become very popular, not all applications and API's provide native Prometheus metrics. In this example, I'll focus on the repository metrics provided by `https://hub.docker.com/v2/repositories`. While the API displays the current pull numbers, storing this information to Prometheus has the advantage of displaying time-based information, e.g. the pull frequency/ratio or pulls in a certain period of time.
To fetch metrics from an HTTP endpoint, the generic [HTTP input plugin](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/http) can be used. This plugin allows collecting metrics from one or more HTTP(S) endpoints and also supports a lot of different [data formats](https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md) not only [JSON](https://github.com/influxdata/telegraf/tree/master/plugins/parsers/json).
By default, only numeric values are extracted from the collected JSON data. In the case of the Docker Registry API endpoint, the status, star_count, pull_count, and collaborator_count fields are used. This behavior can be customized by adding fields for the `json_string_fields` configuration option. Since Prometheus supports only numeric values for metrics, string fields are added as labels to each metric. In some cases, string fields contain useful information that can also be converted to numeric references. For example, status texts such as `OK` and `ERROR` can be converted to `0` and `1` and used as metrics in Prometheus. In the case of the Docker Registry API, I wanted to use the `last_updated` field as a dedicated metric in Prometheus instead of a label. Fortunately, the date time format used can be transformed to a numeric value by converting it to a Unix timestamp.
For simple type conversions, I would recommend to always check the available [converter processor](https://github.com/influxdata/telegraf/tree/master/plugins/processors/converter) first. Sadly there is no such converter for date or date time values available yet, so I had to go another way and utilized the starlark processor. This processor calls a starlark function for each matched metric, allowing for custom programmatic metric processing. While starlark is a Python dialect and might look familiar, it only supports a very limited subset of the language, as explained in the [specification](https://github.com/google/starlark-go/blob/master/doc/spec.md). But it's powerful enough for what I wanted to do: Convert the date time value in the format of `2006-01-02T15:04:05.999999Z` to a valid Unix timestamp. That's how the final configuration looks like:
```TOML
#jinja2: lstrip_blocks: True
[[inputs.http]]
name_override = "dockerhub_respository"
urls = [
"https://hub.docker.com/v2/repositories/library/telegraf/"
]
json_string_fields = [
"last_updated",
"name",
"namespace",
"repository_type",
]
data_format = "json"
[[processors.starlark]]
namepass = ["dockerhub_respository"]
source = '''
load("time.star", "time")
def apply(metric):
metric.fields["last_updated"] = time.parse_time(metric.fields["last_updated"], format="2006-01-02T15:04:05.999999Z").unix
return metric
'''
```
To debug and test the Telegraf configuration it's useful to execute the binary with the `--test` flag:
```Plain
./usr/bin/telegraf --debug --test --config etc/telegraf/telegraf.conf --config-directory etc/telegraf/telegraf.d/
2022-03-12T17:01:07Z I! Starting Telegraf 1.21.4
2022-03-12T17:01:07Z I! Loaded inputs: http
2022-03-12T17:01:07Z I! Loaded aggregators:
2022-03-12T17:01:07Z I! Loaded processors: starlark
2022-03-12T17:01:07Z W! Outputs are not used in testing mode!
2022-03-12T17:01:07Z I! Tags enabled: host=localhost project=prometheus
[...]
> dockerhub_respository,host=localhost,name=telegraf,namespace=library,project=prometheus,repository_type=image,url=https://hub.docker.com/v2/repositories/library/telegraf/ collaborator_count=0,last_updated=1646267076i,pull_count=519804664,star_count=560,status=1 1647104469000000000
```
The final metrics generated by the Prometheus output plugin will look like this:
```Plain
dockerhub_respository_collaborator_count{host="localhost",name="telegraf",namespace="library",project="prometheus",repository_type="image",url="https://hub.docker.com/v2/repositories/library/telegraf/"} 0
dockerhub_respository_last_updated{host="localhost",name="telegraf",namespace="library",project="prometheus",repository_type="image",url="https://hub.docker.com/v2/repositories/library/telegraf/"} 1.646267076e+09
dockerhub_respository_pull_count{host="localhost",name="telegraf",namespace="library",project="prometheus",repository_type="image",url="https://hub.docker.com/v2/repositories/library/telegraf/"} 5.19802966e+08
dockerhub_respository_star_count{host="localhost",name="telegraf",namespace="library",project="prometheus",repository_type="image",url="https://hub.docker.com/v2/repositories/library/telegraf/"} 560
dockerhub_respository_status{host="localhost",name="telegraf",namespace="library",project="prometheus",repository_type="image",url="https://hub.docker.com/v2/repositories/library/telegraf/"} 1
```
That's it. The Telegraf HTTP input plugin is a very flexible but generic way to collect and transform JSON metrics from various sources. If that's still not powerful enough you can pass the raw data fetched by the HTTP input plugin to the starlark converter and write your own functions to parse the input and extract the required information into metrics.

Binary file not shown.

After

Width:  |  Height:  |  Size: 470 KiB

View File

@ -0,0 +1,49 @@
---
title: "How to (not) migrate Graylog to Opensearch"
date: 2022-07-14T14:45:51+02:00
authors:
- robert-kaussow
tags:
- sysadmin
resources:
- name: feature
src: "images/feature.jpg"
params:
anchor: Center
credits: >
[CHUTTERSNAP](https://unsplash.com/@chuttersnap) on
[Unsplash](https://unsplash.com/s/photos/fail)
---
Graylog is a centralized log management solution to capture, store and analyze log files in real-time. Starting with the latest minor release 4.3 Graylog announced to no longer support Elasticsearch (ES) due to licensing and structural changes Elastic introduced in v7.11. For this reason, the last supported ES version is 7.10, which has already reached EOL on May 11, 2022.
<!--more-->
Fortunately, [Graylog](https://go2docs.graylog.org) also knows this and recommends users to switch even if it is currently not enforced and ES 7.10 continues to work for now <!-- spellchecker-disable -->[^elastic]<!-- spellchecker-enable -->
. As you usually don't wants to operate software that no longer receives security updates, I have started to look into a migration and prepared the Container and Ansible setup. My fist mistake on this journey was to believe I can just use the latest Opensearch (OS) release. Had I read the documentation <!-- spellchecker-disable -->[^graylog]<!-- spellchecker-enable -->
more carefully I would have saved myself a lot of trouble...
Anyway, the actual migration of the cluster from <!-- spellchecker-disable -->ES v7.10 to OS v2.1<!-- spellchecker-enable --> succeeded surprisingly smoothly. Well, almost, after all I had to rewrite the complete Ansible role because OS 2.x has changed almost all configuration parameters and API calls :tada: But as you can imagine, everything explodes while trying to start Graylog again. Dang. Just downgrading Opensearch was also not possible as the cluster and all indices were migrated successfully already. To get it back in a working state I decided to reset the entire cluster and restore the snapshots from the S3 backup repository before I start to start a next try, this time with a supported OS 1.x version :fingers_crossed: At least I have already completed the ES disaster recovery test for this year.
Lessons Learned:
- Read documentations/upgrade instructions more carefully
- Ensure to have a working backup
- Test your recovery process frequently to stay calm and comfortable in case of an emergency
- Test upgrades in a staging environment whenever possible
What annoys me a bit about the whole situation is the back and forth and the rather bad communication in the past from Graylog <!-- spellchecker-disable -->[^gl-github]<!-- spellchecker-enable -->
. Furthermore, the situation with Opensearch is not really better, as it is unclear if e.g. version 1.3 is still supported or not and a general lifecycle information is still missing <!-- spellchecker-disable -->[^os-github]<!-- spellchecker-enable -->
.
<!-- spellchecker-disable -->
<!-- markdownlint-capture -->
<!-- markdownlint-disable -->
[^elastic]: https://www.graylog.org/post/graylog-to-add-support-for-opensearch
[^graylog]: https://go2docs.graylog.org/5-0/planning_your_deployment/upgrading_to_opensearch.htm
[^gl-github]: https://github.com/Graylog2/graylog2-server/issues/11804
[^os-github]: https://github.com/opensearch-project/project-website/issues/661
<!-- markdownlint-restore -->
<!-- spellchecker-enable -->

Binary file not shown.

After

Width:  |  Height:  |  Size: 449 KiB

View File

@ -0,0 +1,56 @@
---
title: "Manage Univention DNS with Terraform"
date: 2022-09-11T13:57:16+02:00
authors:
- robert-kaussow
tags:
- Sysadmin
- Automation
resources:
- name: feature
src: "images/feature.jpg"
params:
anchor: Center
credits: >
[Taylor Vick](https://unsplash.com/@tvick) on
[Unsplash](https://unsplash.com/s/photos/data-center)
---
Using [Terraform](/posts/2022/09/store-terraform-state-on-backblaze-s3/) is a great way to manage infrastructure as code. To map all the different types of resources in a deployment, Terraform uses plugins. Plugins are executable binaries written in Go that communicate with Terraform Core via an RPC interface. Each plugin provides an implementation for a specific service.
Sometimes there is no specific plugin for a service, but if the service provides a REST API, the generic [REST API provider](https://registry.terraform.io/providers/Mastercard/restapi/latest/docs) can be helpful. This time, I was looking for a way to manage DNS records on a Univention Corporate Server (UCS) using Terraform. The Univention Directory Manager (UDM) API is well documented and can be used with the RESP API provider, but there are a few minor pitfalls to be aware of.
First of all, a basic provider configuration needs to be added to Terraform:
```Terraform
provider "restapi" {
uri = "https://univention.example.com/univention/udm/"
username = "myuser"
password = "secure-password"
id_attribute = "dn"
debug = true
create_returns_object = true
headers = {
accept = "application/json"
}
}
```
Ensure that a full URL to the UDM is used in the `uri` parameter. If `username` and `password` are used, the Terraform provider adds a basic authentication header. Alternatively, it is also possible to set other authentication headers manually. This is required to use e.g. token-based authentication instead of username and password. Terraform requires a unique ID for all objects under control. Because manually managing unique IDs is somewhat tedious, it is preferred to let the API handle it whenever possible. In the case of Univention, a basic understanding of object management is required. On a UCS, DNS objects are stored in the OpenLDAP objects. As a distinguished name (DN) uniquely identifies an LDAP record, this attribute can be perfectly used as `id_attribute` for the Terraform provider. But it is also important to set `create_returns_object=true`. This option tells the provider that each create operation (POST) will return an object. As a result, for creation events, the provider will parse the returned data, use the `id_attribute` field as a unique ID, and stores the data along with the ID in Terraforms internal data structures. To finalize the provider configuration, the accept header need to be set to `application/json`, otherwise the API will return HTML that can not be parsed by the Terraform provider.
With this configuration, the `restapi_object` resource can now be used to manage DNS records:
```Terraform
resource "restapi_object" "ucs_server" {
path = "/dns/host_record/"
data = jsonencode({
"position" : "zoneName=example.com,cn=dns,dc=example,dc=com,
"properties" : {
"name" : "myhost",
"a" : [
192.168.0.10
],
}
})
}
```

Binary file not shown.

After

Width:  |  Height:  |  Size: 255 KiB

View File

@ -0,0 +1,27 @@
---
title: "SSL certificate monitoring pitfalls"
date: 2022-01-31T23:00:00+01:00
aliases:
- /posts/ssl-certificate-monitoring-pitfalls/
authors:
- robert-kaussow
tags:
- Sysadmin
- Today I learned
resources:
- name: feature
src: "images/feature.jpg"
params:
anchor: Center
credits: >
[Erik Mclean](https://unsplash.com/@introspectivedsgn) on
[Unsplash](https://unsplash.com/photos/cVJWdOncbm8)
---
Certificates are a fundamental part of the Internet's security. At least since Let's Encrypt, a free and automated Certificate Authority, has started its service, SSL is nearly used everywhere. To avoid Certificate issues and possible service outages, it's a good idea to monitor the SSL certificates used by your services, especially as Let's Encrypt certificates have a short lease time of 90 days.
I'm using Prometheus to monitor my infrastructure, and for Prometheus there are multiple ways to get started. Most of the tutorials and posts of the internet will cover the case of expired certificates, and it's pretty easy to achieve. I prefer to use Telegraf, a plugin based metrics collector that also provides Prometheus compatible outputs, instead of dedicated Prometheus exporters. To monitor SSL certificates, I'm using the `x509_cert` input plugin of Telegraf that provides a metric called `x509_cert_expiry` which can be utilized to write simple alerting rules. That's actually pretty cool already, as Prometheus will send out alerts a few weeks before the certificates would expire in case there is a problem within the automatic renewal process.
A week ago, Let's Encrypt has informed affected users that they need to [revoke faulty certificates](https://community.letsencrypt.org/t/questions-about-renewing-before-TLS-ALPN-01-revocations/170449) issued and validated with the `TLS-ALPN-01` challenge. Even if I'm using the `DNS-01` for almost all of my certificates, I have also received a mail and started to look into it. Sadly, the notification mail only contained a "random" ACME registration ID, and I was not able to find the matching client. As mentioned, I don't really use `TLS-ALPN-01`, so I decided to stop the research and leave it to my monitoring to tell me which forgotten service is the evil one after the certificates were revoked. Nothing happened after the revocation, and the monitoring was not complaining. Good - well no, a user reported that one of the services is not reachable anymore and of course this was the one missing client that was using `TLS-ALPN-01` verified certificates - dang. While the issue itself was easy to resolve by a force renew of the certificate, I was still wondering why the monitoring has not caught it.
Well, this was the first time that I had to deal with _revoked_ certificates instead of _expired_ certificates. To be honest, I never thought about the detection of revoked certificates in my monitoring setup before, and therefore this case wasn't covered. But it looks like a fix is also not that straight forward as expected. The used Telegraf input `x509_cert` is not able to detect revoked certificates yet, and the common Prometheus [`blackbox_exporter`](https://github.com/prometheus/blackbox_exporter/issues/6) also don't want to handle this case. The only way I have found so far is to use the [`ssl_exporter`](https://github.com/ribbybibby/ssl_exporter) that provides some revocation information of the certificates using OSCP. If you are already running multiple exporters, that might be the way to go for you. Personally, I prefer to handle as much as possible using Telegraf, so I might look into a [fix](https://github.com/influxdata/telegraf/issues/10550) for the `x509_cert` during the next weeks. However, lessons learned :blue_book:

Binary file not shown.

After

Width:  |  Height:  |  Size: 363 KiB

View File

@ -0,0 +1,52 @@
---
title: "Store Terraform State on Backblaze S3"
date: 2022-09-04T20:47:53+02:00
authors:
- robert-kaussow
tags:
- Sysadmin
- Automation
resources:
- name: feature
src: "images/feature.jpg"
params:
anchor: Center
credits: >
[Wesley Tingey](https://unsplash.com/@wesleyphotography) on
[Unsplash](https://unsplash.com/s/photos/file)
---
Terraform is an open source infrastructure-as-code tool for creating, modifying, and extending infrastructure in a secure and predictable way. Terraform needs to store a state about the managed infrastructure and configuration. This state is used by Terraform to map real-world resources to your configuration and track metadata. By default, this state is stored in a local file, but it can also be stored remotely.
Terraform supports multiple remote backend provider including S3. I already use Backblaze for backups and have had good experiences with it. Since Backblaze also provides an S3 Compatible API, I wanted to use it for Terraform. How to use S3 as a state backend is well [documented](https://www.terraform.io/language/settings/backends/s3), but as it's focused on Amazon S3 there are a few things to take care of. A basic working configuration will look like this:
```Terraform
terraform {
backend "s3" {
bucket = "my-bucket"
key = "state.json"
skip_credentials_validation = true
skip_region_validation = true
endpoint = "https://s3.us-west-004.backblazeb2.com"
region = "us-west-004"
access_key = "0041234567899990000000004"
secret_key = "K001abcdefgklmnopqrstuvw"
}
}
```
It is required to enable `skip_credentials_validation` and `skip_region_validation` because Backblaze uses a different format for these values and the validation only covers Amazon S3. For security reasons, I would recommend setting at least the `access_key` and `secret_key` parameters as environment variables instead of writing them to the Terraform file. For the credentials, it is required to create an App Key on Backblaze. After creating the Key the `keyName` need to be used as `access_key` and `applicationKey` as `secret_key`.
That's basically all. If you want to go a step further, it is possible to save the state encrypted with [Server-Side Encryption](https://www.backblaze.com/b2/docs/server_side_encryption.html) (SSE). There are two options available. The first is `SSE-B2`, where the key is managed and stored by Backblaze, which is quiet simple to configure. The second option is to use customer managed keys. Using this option, an AES-256 and base64 encoded key is used. To generate a proper key, the command `openssl rand -base64 32` can be used.
To enable encryption in the Terraform Provider, the configuration must be extended:
```Terraform
terraform {
backend "s3" {
...
encrypt = true
sse_customer_key = "fsRb1SXBjiUqBM0rw/YqvDixScWnDCZsK7BhnPTc93Y="
}
}
```

Binary file not shown.

After

Width:  |  Height:  |  Size: 302 KiB

View File

@ -0,0 +1,112 @@
---
title: "Use Tradfri Shortcut Button with Home Assistant"
date: 2022-08-22T21:35:25+02:00
authors:
- robert-kaussow
tags:
- Automation
resources:
- name: feature
src: "images/feature.jpg"
params:
anchor: Center
credits: >
[Artem Bryzgalov](https://unsplash.com/@abrizgalov) on
[Unsplash](https://unsplash.com/s/photos/switch)
---
Sometimes it can be helpful if single actions or entire automation in your Home Automation can be triggered with a physical button and not only from the Web UI. I'm using Zigbee2MQTT as a generic Zigbee to MQTT bridge as it supports a lot of different Zigbee devices and integrates flawlessly into Home Assistant. As I have a lot of different Tradfri Lamps in use already, I have bought a few of the Tradfri Shortcut Button (E1812).
<!--more-->
Pairing the Buttons with the [Zigbee2MQTT](https://www.zigbee2mqtt.io) coordinator was simple as usual, but I had some trouble to figure out how to use it in [Home Assistant](https://www.home-assistant.io/docs). Zigbee2MQTT recommends to use the [MQTT device triggers](https://www.zigbee2mqtt.io/guide/usage/integrations/home_assistant.html#via-mqtt-device-trigger-recommended) but the documentation wasn't that clear on this part.
The first thing to do is to figure out the available triggers of the device. Triggers are published to the MQTT topic `<discovery_prefix>/device_automation/` and you can subscribe to is to discover the messages. To subscribe to an MQTT topic `mosquitto_sub` CLI command can be used e.g. `mosquitto_sub -h 192.168.0.1 -p 8883 -u user -P secure-password -t homeassistant/device_automation/#` or the Home Assistant UI at **_Settings_** \/ **_Devices & Services_** \/ <!-- spellchecker-disable -->**_Integrations_**<!-- spellchecker-enable --> \/ **_MQTT_** \/ **_Configure_** \/ **_Listen to a topic_**.
After subscribing to the topic, remember that device triggers are not published until the event has been triggered at least once on the device. Afterwards the following triggers should be listed for the Tradfri Button:
```Json
{
"automation_type": "trigger",
"device": {
"identifiers": [
"zigbee2mqtt_0x84b112233445566"
],
"manufacturer": "IKEA",
"model": "TRADFRI shortcut button (E1812)",
"name": "livingroom/switch_test",
"sw_version": "2.3.080"
},
"payload": "off",
"subtype": "off",
"topic": "zigbee2mqtt/livingroom/switch_test/action",
"type": "action"
}
{
"automation_type": "trigger",
"device": {
"identifiers": [
"zigbee2mqtt_0x84b112233445566"
],
"manufacturer": "IKEA",
"model": "TRADFRI shortcut button (E1812)",
"name": "livingroom/switch_test",
"sw_version": "2.3.080"
},
"payload": "brightness_stop",
"subtype": "brightness_stop",
"topic": "zigbee2mqtt/livingroom/switch_test/action",
"type": "action"
}
{
"automation_type": "trigger",
"device": {
"identifiers": [
"zigbee2mqtt_0x84b112233445566"
],
"manufacturer": "IKEA",
"model": "TRADFRI shortcut button (E1812)",
"name": "livingroom/switch_test",
"sw_version": "2.3.080"
},
"payload": "brightness_move_up",
"subtype": "brightness_move_up",
"topic": "zigbee2mqtt/livingroom/switch_test/action",
"type": "action"
}
{
"automation_type": "trigger",
"device": {
"identifiers": [
"zigbee2mqtt_0x84b112233445566"
],
"manufacturer": "IKEA",
"model": "TRADFRI shortcut button (E1812)",
"name": "livingroom/switch_test",
"sw_version": "2.3.080"
},
"payload": "on",
"subtype": "on",
"topic": "zigbee2mqtt/livingroom/switch_test/action",
"type": "action"
}
```
Great, with this data determined, it's almost done. The only missing information is the `device_id`. I have not found a nice way to get this ID yet, but it is part of the URL after navigating to **_Settings_** \/ **_Devices & Services_** \/ **_Devices_** \/ **_\<device\>_**.
Finally an action can be assigned to a button. The example below is starting the Vacuum cleaner on a `single_click` (`on`) trigger:
```YAML
- alias: Start Vaccum
trigger:
- platform: device
domain: mqtt
device_id: 61b011e2e7d5e111111d8d804a029f61
type: action
subtype: "on"
discovery_id: 0x84b112233445566 action_on
action:
- service: vacuum.start
target:
entity_id: vacuum.valetudo_rockrobo
```

5
data/menu/extra.yml Normal file
View File

@ -0,0 +1,5 @@
---
footer:
- name: About
icon: gblog_person
ref: "/about"

View File

@ -1,7 +1,20 @@
<link rel="apple-touch-icon" sizes="180x180" href="{{ "favicon/apple-touch-icon.png" | relURL }}">
<link rel="icon" type="image/png" sizes="32x32" href="{{ "favicon/favicon-32x32.png" | relURL }}">
<link rel="icon" type="image/png" sizes="16x16" href="{{ "favicon/favicon-16x16.png" | relURL }}">
<link rel="manifest" href="{{ "favicon/site.webmanifest" | relURL }}">
<link rel="mask-icon" href="{{ "favicon/safari-pinned-tab.svg" | relURL }}" color="#2f333e">
<meta name="msapplication-TileColor" content="#2f333e">
<meta name="theme-color" content="#2f333e">
<link rel="icon" type="image/svg+xml" href="{{ "favicon/favicon.svg" | relURL }}" />
<link
rel="icon"
type="image/png"
sizes="48x48"
href="{{ "favicon/favicon-32x32.png" | relURL }}"
/>
<link
rel="icon"
type="image/png"
sizes="32x32"
href="{{ "favicon/favicon-32x32.png" | relURL }}"
/>
<link
rel="icon"
type="image/png"
sizes="16x16"
href="{{ "favicon/favicon-16x16.png" | relURL }}"
/>
<link rel="manifest" href="{{ "favicon/manifest.json" | relURL }}" />

View File

@ -1,8 +1,2 @@
User-agent: *
Disallow: /atom.xml
Disallow: /authors/*
Disallow: /tags/*
Disallow: /page/*
Sitemap: {{ "/sitemap.xml" | absURL }}

5004
package-lock.json generated Normal file

File diff suppressed because it is too large Load Diff

39
package.json Normal file
View File

@ -0,0 +1,39 @@
{
"name": "the-geeklab",
"version": "1.0.0",
"description": "My personal blog",
"main": "index.js",
"scripts": {
"build": "run-s prep svg build:*",
"build:webpack": "webpack --mode=production",
"start": "run-s prep:clean prep:make svg build:webpack ; run-s start:hugo",
"start:hugo": "hugo server -D -F",
"svg": "run-s svg:*",
"svg:sprite": "svg-sprite -C svgsprite.config.json 'src/icons/*.svg'",
"prep": "run-s prep:*",
"prep:clean": "shx rm -rf build/ static/ assets/sprites/",
"prep:make": "shx mkdir -p build/"
},
"repository": {
"type": "git",
"url": "https://github.com/thegeeklab/hugo-geekblog"
},
"author": "Robert Kaussow",
"license": "MIT",
"devDependencies": {
"copy-webpack-plugin": "11.0.0",
"favicons-webpack-plugin": "6.0.0",
"js-yaml": "4.1.0",
"npm-run-all": "4.1.5",
"prettier": "2.8.8",
"prettier-plugin-go-template": "0.0.13",
"shx": "0.3.4",
"svg-sprite": "2.0.2",
"webpack": "5.88.1",
"webpack-cli": "5.1.4",
"webpack-favicons": "1.3.8"
},
"overrides": {
"colors": "1.4.0"
}
}

View File

@ -1,23 +1,4 @@
{
"extends": ["config:base"],
"regexManagers": [
{
"fileMatch": ["^Makefile$"],
"matchStrings": [
"# renovate: datasource=(?<datasource>\\S+) depName=(?<depName>\\S+)( versioning=(?<versioning>.*?))?\\n.*?_VERSION := (?<currentValue>.*)\\s"
]
}
],
"packageRules": [
{
"datasources": ["github-releases"],
"paths": ["Makedile"],
"groupName": "hugo theme",
"packagePatterns": ["^thegeeklab"],
"automerge": true
}
],
"droneci": {
"enabled": false
}
"$schema": "https://docs.renovatebot.com/renovate-schema.json",
"extends": ["github>thegeeklab/renovate-presets"]
}

0
src/dummy.js Normal file
View File

5
src/icons/ansible.svg Normal file
View File

@ -0,0 +1,5 @@
<!-- Generated by IcoMoon.io -->
<svg version="1.1" xmlns="http://www.w3.org/2000/svg" width="32" height="32" viewBox="0 0 32 32">
<title>ansible</title>
<path d="M14.156 15.297l6.248 4.927-4.136-10.216zM16 0c-8.839 0-16 7.161-16 16s7.161 16 16 16 16-7.161 16-16-7.161-16-16-16zM23.729 23.073c-0.015 0.628-0.537 1.123-1.167 1.107-0.315 0-0.555-0.12-0.885-0.391l-8.253-6.667-2.772 6.937h-2.397l6.996-16.807c0.165-0.419 0.569-0.693 1.019-0.675 0.435-0.019 0.84 0.252 0.989 0.675l6.365 15.325c0.060 0.148 0.107 0.312 0.107 0.464-0.001 0.012-0.001 0.012-0.001 0.031z"></path>
</svg>

After

Width:  |  Height:  |  Size: 579 B

5
src/icons/docker.svg Normal file
View File

@ -0,0 +1,5 @@
<!-- Generated by IcoMoon.io -->
<svg version="1.1" xmlns="http://www.w3.org/2000/svg" width="32" height="32" viewBox="0 0 32 32">
<title>docker</title>
<path d="M6.426 23.030c-0.912 0-1.739-0.747-1.739-1.654s0.747-1.658 1.74-1.658c0.997 0 1.747 0.747 1.747 1.656s-0.829 1.654-1.748 1.655zM27.778 14.012c-0.18-1.323-1-2.4-2.080-3.227l-0.42-0.333-0.339 0.413c-0.659 0.747-0.92 2.071-0.84 3.060 0.080 0.749 0.32 1.494 0.739 2.072-0.339 0.173-0.757 0.333-1.080 0.503-0.76 0.249-1.499 0.333-2.24 0.333h-21.389l-0.080 0.493c-0.16 1.576 0.080 3.227 0.749 4.721l0.325 0.58v0.080c2 3.311 5.561 4.801 9.438 4.801 7.46 0 13.578-3.227 16.478-10.179 1.9 0.083 3.819-0.413 4.721-2.235l0.24-0.413-0.4-0.249c-1.080-0.659-2.56-0.747-3.8-0.413zM17.099 12.689h-3.238v3.227h3.24v-3.23zM17.099 8.631h-3.238v3.227h3.24v-3.223zM17.099 4.492h-3.238v3.227h3.24v-3.227zM21.060 12.689h-3.219v3.227h3.227v-3.23zM9.061 12.689h-3.218v3.227h3.23v-3.23zM13.102 12.689h-3.2v3.227h3.219v-3.23zM5.061 12.689h-3.195v3.227h3.238v-3.23zM13.102 8.631h-3.2v3.227h3.219v-3.223zM9.041 8.631h-3.192v3.227h3.218v-3.223z"></path>
</svg>

After

Width:  |  Height:  |  Size: 1.1 KiB

5
src/icons/golang.svg Normal file
View File

@ -0,0 +1,5 @@
<!-- Generated by IcoMoon.io -->
<svg version="1.1" xmlns="http://www.w3.org/2000/svg" width="32" height="32" viewBox="0 0 32 32">
<title>golang</title>
<path d="M2.415 13.642c-0.063 0-0.077-0.031-0.047-0.079l0.328-0.42c0.031-0.047 0.108-0.077 0.171-0.077h5.562c0.061 0 0.077 0.047 0.047 0.093l-0.265 0.404c-0.031 0.048-0.109 0.093-0.156 0.093zM0.063 15.075c-0.063 0-0.079-0.031-0.047-0.077l0.327-0.421c0.031-0.047 0.109-0.077 0.172-0.077h7.104c0.063 0 0.093 0.047 0.077 0.093l-0.124 0.373c-0.016 0.063-0.077 0.093-0.14 0.093zM3.834 16.509c-0.063 0-0.079-0.047-0.047-0.093l0.217-0.389c0.031-0.047 0.093-0.093 0.156-0.093h3.116c0.063 0 0.093 0.047 0.093 0.109l-0.031 0.373c0 0.063-0.063 0.109-0.109 0.109zM20.005 13.362c-0.981 0.249-1.652 0.436-2.617 0.685-0.235 0.061-0.249 0.077-0.453-0.156-0.232-0.265-0.404-0.436-0.731-0.592-0.983-0.483-1.933-0.343-2.82 0.233-1.060 0.685-1.605 1.699-1.589 2.96 0.015 1.247 0.872 2.275 2.103 2.447 1.060 0.14 1.947-0.233 2.649-1.028 0.14-0.172 0.264-0.359 0.42-0.577h-3.006c-0.327 0-0.405-0.203-0.296-0.467 0.203-0.483 0.576-1.293 0.795-1.699 0.067-0.148 0.214-0.249 0.384-0.249 0.002 0 0.004 0 0.006 0h5.67c-0.031 0.421-0.031 0.841-0.093 1.263-0.174 1.165-0.627 2.201-1.289 3.069l0.012-0.016c-1.121 1.48-2.587 2.4-4.44 2.648-1.527 0.203-2.945-0.093-4.19-1.027-1.153-0.873-1.808-2.027-1.979-3.46-0.203-1.699 0.296-3.225 1.324-4.565 1.107-1.448 2.571-2.368 4.362-2.695 1.464-0.265 2.867-0.093 4.128 0.763 0.827 0.547 1.417 1.293 1.808 2.197 0.093 0.14 0.031 0.219-0.156 0.265zM25.162 21.978c-1.419-0.032-2.712-0.437-3.802-1.372-0.883-0.748-1.494-1.793-1.679-2.979l-0.004-0.027c-0.28-1.76 0.203-3.319 1.263-4.705 1.137-1.496 2.508-2.275 4.362-2.601 1.589-0.28 3.085-0.125 4.44 0.795 1.231 0.84 1.995 1.979 2.197 3.473 0.264 2.104-0.343 3.817-1.792 5.282-1.028 1.044-2.291 1.697-3.74 1.993-0.42 0.079-0.84 0.093-1.245 0.141zM28.869 15.683c-0.015-0.203-0.015-0.359-0.045-0.515-0.28-1.543-1.699-2.415-3.178-2.072-1.449 0.327-2.384 1.247-2.727 2.711-0.28 1.216 0.312 2.447 1.433 2.945 0.857 0.373 1.713 0.327 2.54-0.093 1.231-0.64 1.9-1.636 1.977-2.976z"></path>
</svg>

After

Width:  |  Height:  |  Size: 2.1 KiB

5
src/icons/kubernetes.svg Normal file

File diff suppressed because one or more lines are too long

After

Width:  |  Height:  |  Size: 5.6 KiB

5
src/icons/python.svg Normal file
View File

@ -0,0 +1,5 @@
<!-- Generated by IcoMoon.io -->
<svg version="1.1" xmlns="http://www.w3.org/2000/svg" width="32" height="32" viewBox="0 0 32 32">
<title>python</title>
<path d="M18.985 0.319l1.194 0.265 0.968 0.345 0.783 0.398 0.597 0.425 0.451 0.451 0.332 0.451 0.212 0.438 0.133 0.398 0.053 0.345 0.027 0.265-0.013 0.172v7.085l-0.066 0.836-0.172 0.73-0.279 0.61-0.345 0.504-0.398 0.411-0.438 0.332-0.464 0.252-0.464 0.186-0.438 0.133-0.398 0.093-0.345 0.053-0.279 0.027h-7.92l-0.915 0.066-0.783 0.186-0.663 0.292-0.544 0.358-0.438 0.425-0.358 0.464-0.265 0.478-0.199 0.491-0.133 0.464-0.093 0.425-0.053 0.358-0.027 0.279v4.060h-2.959l-0.279-0.040-0.371-0.093-0.425-0.159-0.464-0.239-0.478-0.345-0.478-0.478-0.464-0.61-0.425-0.783-0.371-0.968-0.279-1.167-0.186-1.393-0.066-1.632 0.080-1.619 0.212-1.38 0.318-1.154 0.425-0.942 0.478-0.756 0.531-0.584 0.557-0.438 0.557-0.318 0.531-0.212 0.478-0.133 0.425-0.066 0.318-0.013h0.212l0.080 0.013h10.826v-1.101h-7.748l-0.013-3.648-0.027-0.491 0.066-0.451 0.146-0.411 0.226-0.371 0.332-0.345 0.411-0.305 0.504-0.265 0.584-0.239 0.677-0.199 0.769-0.159 0.849-0.133 0.942-0.080 1.021-0.053 1.114-0.027 1.685 0.066zM10.627 2.946l-0.305 0.438-0.106 0.544 0.106 0.544 0.305 0.451 0.438 0.292 0.544 0.119 0.544-0.119 0.438-0.292 0.305-0.451 0.106-0.544-0.106-0.544-0.305-0.438-0.438-0.292-0.544-0.119-0.544 0.119zM27.993 8.186l0.371 0.080 0.425 0.159 0.464 0.239 0.478 0.358 0.478 0.464 0.464 0.623 0.425 0.783 0.371 0.968 0.279 1.167 0.186 1.38 0.066 1.632-0.080 1.632-0.212 1.38-0.318 1.141-0.425 0.942-0.478 0.756-0.531 0.597-0.557 0.438-0.557 0.318-0.531 0.212-0.478 0.119-0.425 0.066-0.318 0.027-0.212-0.013h-10.906v1.088h7.748l0.013 3.662 0.027 0.478-0.066 0.451-0.146 0.411-0.226 0.385-0.332 0.332-0.411 0.318-0.504 0.265-0.584 0.226-0.677 0.199-0.769 0.172-0.849 0.119-0.942 0.093-1.021 0.053-1.114 0.013-1.685-0.053-1.42-0.186-1.194-0.265-0.968-0.332-0.783-0.398-0.597-0.438-0.451-0.451-0.332-0.451-0.212-0.438-0.133-0.398-0.053-0.332-0.027-0.265 0.013-0.172v-7.085l0.066-0.849 0.172-0.716 0.279-0.61 0.345-0.504 0.398-0.425 0.438-0.318 0.464-0.265 0.464-0.186 0.438-0.133 0.398-0.080 0.345-0.053 0.279-0.027 0.172-0.013h7.748l0.915-0.066 0.783-0.186 0.663-0.279 0.544-0.371 0.438-0.425 0.358-0.464 0.265-0.478 0.199-0.478 0.133-0.464 0.093-0.425 0.053-0.371 0.027-0.279v-4.060h2.773l0.186 0.013zM19.409 27.092l-0.305 0.438-0.106 0.544 0.106 0.544 0.305 0.438 0.438 0.305 0.544 0.106 0.544-0.106 0.438-0.305 0.305-0.438 0.106-0.544-0.106-0.544-0.305-0.438-0.438-0.305-0.544-0.106-0.544 0.106z"></path>
</svg>

After

Width:  |  Height:  |  Size: 2.5 KiB

5
src/icons/terraform.svg Normal file
View File

@ -0,0 +1,5 @@
<!-- Generated by IcoMoon.io -->
<svg version="1.1" xmlns="http://www.w3.org/2000/svg" width="32" height="32" viewBox="0 0 32 32">
<title>terraform</title>
<path d="M11.029 5.645l9.939 5.049v10.096l-9.939-5.049zM22.057 10.694v10.096l9.943-5.049v-10.096zM0 0.011v10.096l9.939 5.049v-10.096zM11.029 26.941l9.939 5.049v-10.092l-9.939-5.049z"></path>
</svg>

After

Width:  |  Height:  |  Size: 354 B

5
src/icons/tux.svg Normal file

File diff suppressed because one or more lines are too long

After

Width:  |  Height:  |  Size: 7.0 KiB

44
src/static/brand.svg Normal file
View File

@ -0,0 +1,44 @@
<?xml version="1.0" encoding="UTF-8" standalone="no"?>
<svg
id="Layer_1"
data-name="Layer 1"
viewBox="0 0 1066.5 1066.5"
version="1.1"
sodipodi:docname="brand.svg"
inkscape:export-filename="/home/rknet/rkau2905/Bilder/the Geeklab/new/brand.png"
inkscape:export-xdpi="270.04221"
inkscape:export-ydpi="270.04221"
inkscape:version="1.1 (c68e22c387, 2021-05-23)"
xmlns:inkscape="http://www.inkscape.org/namespaces/inkscape"
xmlns:sodipodi="http://sodipodi.sourceforge.net/DTD/sodipodi-0.dtd"
xmlns="http://www.w3.org/2000/svg"
xmlns:svg="http://www.w3.org/2000/svg">
<sodipodi:namedview
id="namedview11"
pagecolor="#2f333e"
bordercolor="#666666"
borderopacity="1.0"
inkscape:pageshadow="2"
inkscape:pageopacity="0"
inkscape:pagecheckerboard="0"
showgrid="false"
inkscape:zoom="0.62353493"
inkscape:cx="533.25"
inkscape:cy="532.44812"
inkscape:window-width="2560"
inkscape:window-height="1380"
inkscape:window-x="0"
inkscape:window-y="32"
inkscape:window-maximized="1"
inkscape:current-layer="Layer_1" />
<defs
id="defs4">
<style
id="style2">.cls-1{fill:#00101a;}.cls-2{fill:#fff;}</style>
</defs>
<path
id="path8"
class="cls-2"
d="M 532.3508,4.6997534 A 82.859095,82.859095 0 0 0 490.40895,15.816519 L 167.86364,203.60387 A 229.98135,229.98135 0 0 0 54.141557,402.24509 l 1.032151,288.3446 A 229.43295,229.43295 0 0 0 170.27201,888.21501 l 250.38855,143.39819 a 229.21691,229.21691 0 0 0 228.70031,-0.7466 L 898.63612,885.78716 a 229.49941,229.49941 0 0 0 113.72218,-198.5049 l -0.9835,-278.75659 A 136.45327,136.45327 0 0 0 806.33938,291.08391 L 446.30216,510.1313 444.64033,702.75485 847.60287,455.95611 848.45,687.81456 a 65.625866,65.625866 0 0 1 -32.10717,56.35309 L 566.90204,889.22767 a 65.310118,65.310118 0 0 1 -64.81157,0.21748 L 251.75062,746.04371 a 65.193788,65.193788 0 0 1 -32.7044,-55.98631 l -1.01269,-288.47769 a 65.17717,65.17717 0 0 1 32.35386,-56.20378 L 448.51253,229.79725 446.8507,404.12116 611.55413,307.15374 613.6152,87.311119 A 82.094651,82.094651 0 0 0 532.3508,4.6997534 Z"
style="stroke-width:1.66183" />
</svg>

After

Width:  |  Height:  |  Size: 2.2 KiB

3
src/static/custom.css Normal file
View File

@ -0,0 +1,3 @@
:root {
--code-max-height: 60rem;
}

View File

@ -0,0 +1,46 @@
<?xml version="1.0" encoding="UTF-8" standalone="no"?>
<svg
id="Layer_1"
data-name="Layer 1"
viewBox="0 0 1066.5 1066.5"
version="1.1"
sodipodi:docname="favicon.svg"
inkscape:export-filename="/home/rknet/rkau2905/Bilder/the Geeklab/new/brand.png"
inkscape:export-xdpi="270.04221"
inkscape:export-ydpi="270.04221"
inkscape:version="1.2.2 (b0a8486541, 2022-12-01)"
xmlns:inkscape="http://www.inkscape.org/namespaces/inkscape"
xmlns:sodipodi="http://sodipodi.sourceforge.net/DTD/sodipodi-0.dtd"
xmlns="http://www.w3.org/2000/svg"
xmlns:svg="http://www.w3.org/2000/svg">
<sodipodi:namedview
id="namedview11"
pagecolor="#2f333e"
bordercolor="#666666"
borderopacity="1.0"
inkscape:pageshadow="2"
inkscape:pageopacity="0"
inkscape:pagecheckerboard="0"
showgrid="false"
inkscape:zoom="0.62353493"
inkscape:cx="337.59135"
inkscape:cy="533.25"
inkscape:window-width="2560"
inkscape:window-height="1371"
inkscape:window-x="0"
inkscape:window-y="32"
inkscape:window-maximized="1"
inkscape:current-layer="Layer_1"
inkscape:showpageshadow="2"
inkscape:deskcolor="#2f333e" />
<defs
id="defs4">
<style
id="style2">.cls-1{fill:#00101a;}.cls-2{fill:#fff;}</style>
</defs>
<path
id="path8"
class="cls-2"
d="M 532.3508,4.6997534 A 82.859095,82.859095 0 0 0 490.40895,15.816519 L 167.86364,203.60387 A 229.98135,229.98135 0 0 0 54.141557,402.24509 l 1.032151,288.3446 A 229.43295,229.43295 0 0 0 170.27201,888.21501 l 250.38855,143.39819 a 229.21691,229.21691 0 0 0 228.70031,-0.7466 L 898.63612,885.78716 a 229.49941,229.49941 0 0 0 113.72218,-198.5049 l -0.9835,-278.75659 A 136.45327,136.45327 0 0 0 806.33938,291.08391 L 446.30216,510.1313 444.64033,702.75485 847.60287,455.95611 848.45,687.81456 a 65.625866,65.625866 0 0 1 -32.10717,56.35309 L 566.90204,889.22767 a 65.310118,65.310118 0 0 1 -64.81157,0.21748 L 251.75062,746.04371 a 65.193788,65.193788 0 0 1 -32.7044,-55.98631 l -1.01269,-288.47769 a 65.17717,65.17717 0 0 1 32.35386,-56.20378 L 448.51253,229.79725 446.8507,404.12116 611.55413,307.15374 613.6152,87.311119 A 82.094651,82.094651 0 0 0 532.3508,4.6997534 Z"
style="stroke-width:1.66183;fill:#205375;fill-opacity:1" />
</svg>

After

Width:  |  Height:  |  Size: 2.3 KiB

146
src/static/socialmedia.svg Normal file

File diff suppressed because one or more lines are too long

After

Width:  |  Height:  |  Size: 20 KiB

BIN
src/static/socialmedia2.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 36 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 118 KiB

35
svgsprite.config.json Normal file
View File

@ -0,0 +1,35 @@
{
"shape": {
"id": {
"generator": "geeklab_%s"
},
"dimension": {
"maxWidth": 22,
"maxHeight": 22,
"attributes": false
},
"spacing": {
"padding": 5,
"box": "content"
},
"dest": "build/icons"
},
"svg": {
"xmlDeclaration": false,
"rootAttributes": {
"class": "svg-sprite"
}
},
"mode": {
"defs": {
"dest": "build/sprites/",
"sprite": "geeklab.svg",
"bust": false
},
"stack": {
"dest": "build/img/",
"sprite": "geeklab-stack.svg",
"bust": false
}
}
}

61
webpack.config.js Normal file
View File

@ -0,0 +1,61 @@
const path = require("path")
const yaml = require("js-yaml")
const fs = require("fs")
const WebpackFavicons = require("webpack-favicons")
const CopyPlugin = require("copy-webpack-plugin")
let config
try {
config = yaml.load(fs.readFileSync(path.join(__dirname, "config.yml"), "utf8"))
} catch (e) {
console.log(e)
}
module.exports = {
entry: [path.resolve("src", "dummy.js")],
output: {
filename: "../build/dummy.js",
path: path.join(__dirname, "static"),
publicPath: "/",
clean: true
},
plugins: [
new CopyPlugin({
patterns: [
{
from: "**/*",
context: path.resolve(__dirname, "src", "static")
},
{
from: "sprites/*.svg",
to: path.resolve(__dirname, "assets"),
context: path.resolve(__dirname, "build")
},
{
from: "img/*.svg",
context: path.resolve(__dirname, "build")
}
]
}),
new WebpackFavicons({
src: path.resolve("src", "static", "favicon", "favicon.svg"),
path: "favicon/",
appName: config.title,
appShortName: config.title.concat(" - ", config.params.subtitle),
appDescription: config.params.description,
background: "#2f333e",
theme_color: "#2f333e",
icons: {
android: { offset: 10 },
appleIcon: { offset: 10 },
appleStartup: { offset: 10 },
favicons: true,
windows: { offset: 10 },
yandex: false,
coast: false
}
})
]
}