pax_global_header 0000666 0000000 0000000 00000000064 13744741101 0014514 g ustar 00root root 0000000 0000000 52 comment=b99513ebadc5f39c77d109597804b76b79e5a2c0
blog-b99513ebadc5f39c77d109597804b76b79e5a2c0/ 0000775 0000000 0000000 00000000000 13744741101 0017404 5 ustar 00root root 0000000 0000000 blog-b99513ebadc5f39c77d109597804b76b79e5a2c0/.envrc 0000775 0000000 0000000 00000000030 13744741101 0020516 0 ustar 00root root 0000000 0000000 source_up
layout pipenv
blog-b99513ebadc5f39c77d109597804b76b79e5a2c0/.gitignore 0000664 0000000 0000000 00000000635 13744741101 0021400 0 ustar 00root root 0000000 0000000 ~*
*~
*.sw[op]
*.py[cod]
.DS_Store
__pycache__/
.vagrant/
vendor/
Thumbs.db
*.retry
.svn/
.sass-cache/
*.log
*.out
*.so
node_modules/
.npm/
nbproject/
*.ipynb
.idea/
*.egg-info/
*.[ao]
.classpath
.cache/
bower_components/
*.class
*.[ewj]ar
secring.*
.*.kate-swp
.swp.*
.directory
.Trash-*
build/
_build/
dist/
.tox/
*.pdf
*.exe
*.dll
*.gz
*.tgz
*.tar
*.rar
*.zip
*.pid
*.lock
*.env
.bundle/
!Pipfile.lock
output/
blog-b99513ebadc5f39c77d109597804b76b79e5a2c0/.gitmodules 0000664 0000000 0000000 00000000165 13744741101 0021563 0 ustar 00root root 0000000 0000000 [submodule "pelican-mockingbird"]
path = pelican-mockingbird
url = https://www.shore.co.il/git/pelican-mockingbird
blog-b99513ebadc5f39c77d109597804b76b79e5a2c0/Pipfile 0000664 0000000 0000000 00000000312 13744741101 0020713 0 ustar 00root root 0000000 0000000 [[source]]
url = "https://pypi.python.org/simple"
verify_ssl = true
name = "pypi"
[packages]
pelican = "*"
"fabric3" = "*"
pelican-minification = "*"
[dev-packages]
[requires]
python_version = "3.6"
blog-b99513ebadc5f39c77d109597804b76b79e5a2c0/Pipfile.lock 0000664 0000000 0000000 00000036763 13744741101 0021665 0 ustar 00root root 0000000 0000000 {
"_meta": {
"hash": {
"sha256": "119959021872ee07af13ffcd6190f06f366833af53825a10fcbe23801b00c3a2"
},
"pipfile-spec": 6,
"requires": {
"python_version": "3.6"
},
"sources": [
{
"name": "pypi",
"url": "https://pypi.python.org/simple",
"verify_ssl": true
}
]
},
"default": {
"bcrypt": {
"hashes": [
"sha256:5b93c1726e50a93a033c36e5ca7fdcd29a5c7395af50a6892f5d9e7c6cfbfb29",
"sha256:63d4e3ff96188e5898779b6057878fecf3f11cfe6ec3b313ea09955d587ec7a7",
"sha256:81fec756feff5b6818ea7ab031205e1d323d8943d237303baca2c5f9c7846f34",
"sha256:a67fb841b35c28a59cebed05fbd3e80eea26e6d75851f0574a9273c80f3e9b55",
"sha256:c95d4cbebffafcdd28bd28bb4e25b31c50f6da605c81ffd9ad8a3d1b2ab7b1b6",
"sha256:cd1ea2ff3038509ea95f687256c46b79f5fc382ad0aa3664d200047546d511d1",
"sha256:cdcdcb3972027f83fe24a48b1e90ea4b584d35f1cc279d76de6fc4b13376239d"
],
"version": "==3.2.0"
},
"blinker": {
"hashes": [
"sha256:471aee25f3992bd325afa3772f1063dbdbbca947a041b8b89466dc00d606f8b6"
],
"version": "==1.4"
},
"cffi": {
"hashes": [
"sha256:005f2bfe11b6745d726dbb07ace4d53f057de66e336ff92d61b8c7e9c8f4777d",
"sha256:09e96138280241bd355cd585148dec04dbbedb4f46128f340d696eaafc82dd7b",
"sha256:0b1ad452cc824665ddc682400b62c9e4f5b64736a2ba99110712fdee5f2505c4",
"sha256:0ef488305fdce2580c8b2708f22d7785ae222d9825d3094ab073e22e93dfe51f",
"sha256:15f351bed09897fbda218e4db5a3d5c06328862f6198d4fb385f3e14e19decb3",
"sha256:22399ff4870fb4c7ef19fff6eeb20a8bbf15571913c181c78cb361024d574579",
"sha256:23e5d2040367322824605bc29ae8ee9175200b92cb5483ac7d466927a9b3d537",
"sha256:2791f68edc5749024b4722500e86303a10d342527e1e3bcac47f35fbd25b764e",
"sha256:2f9674623ca39c9ebe38afa3da402e9326c245f0f5ceff0623dccdac15023e05",
"sha256:3363e77a6176afb8823b6e06db78c46dbc4c7813b00a41300a4873b6ba63b171",
"sha256:33c6cdc071ba5cd6d96769c8969a0531be2d08c2628a0143a10a7dcffa9719ca",
"sha256:3b8eaf915ddc0709779889c472e553f0d3e8b7bdf62dab764c8921b09bf94522",
"sha256:3cb3e1b9ec43256c4e0f8d2837267a70b0e1ca8c4f456685508ae6106b1f504c",
"sha256:3eeeb0405fd145e714f7633a5173318bd88d8bbfc3dd0a5751f8c4f70ae629bc",
"sha256:44f60519595eaca110f248e5017363d751b12782a6f2bd6a7041cba275215f5d",
"sha256:4d7c26bfc1ea9f92084a1d75e11999e97b62d63128bcc90c3624d07813c52808",
"sha256:529c4ed2e10437c205f38f3691a68be66c39197d01062618c55f74294a4a4828",
"sha256:6642f15ad963b5092d65aed022d033c77763515fdc07095208f15d3563003869",
"sha256:85ba797e1de5b48aa5a8427b6ba62cf69607c18c5d4eb747604b7302f1ec382d",
"sha256:8f0f1e499e4000c4c347a124fa6a27d37608ced4fe9f7d45070563b7c4c370c9",
"sha256:a624fae282e81ad2e4871bdb767e2c914d0539708c0f078b5b355258293c98b0",
"sha256:b0358e6fefc74a16f745afa366acc89f979040e0cbc4eec55ab26ad1f6a9bfbc",
"sha256:bbd2f4dfee1079f76943767fce837ade3087b578aeb9f69aec7857d5bf25db15",
"sha256:bf39a9e19ce7298f1bd6a9758fa99707e9e5b1ebe5e90f2c3913a47bc548747c",
"sha256:c11579638288e53fc94ad60022ff1b67865363e730ee41ad5e6f0a17188b327a",
"sha256:c150eaa3dadbb2b5339675b88d4573c1be3cb6f2c33a6c83387e10cc0bf05bd3",
"sha256:c53af463f4a40de78c58b8b2710ade243c81cbca641e34debf3396a9640d6ec1",
"sha256:cb763ceceae04803adcc4e2d80d611ef201c73da32d8f2722e9d0ab0c7f10768",
"sha256:cc75f58cdaf043fe6a7a6c04b3b5a0e694c6a9e24050967747251fb80d7bce0d",
"sha256:d80998ed59176e8cba74028762fbd9b9153b9afc71ea118e63bbf5d4d0f9552b",
"sha256:de31b5164d44ef4943db155b3e8e17929707cac1e5bd2f363e67a56e3af4af6e",
"sha256:e66399cf0fc07de4dce4f588fc25bfe84a6d1285cc544e67987d22663393926d",
"sha256:f0620511387790860b249b9241c2f13c3a80e21a73e0b861a2df24e9d6f56730",
"sha256:f4eae045e6ab2bb54ca279733fe4eb85f1effda392666308250714e01907f394",
"sha256:f92cdecb618e5fa4658aeb97d5eb3d2f47aa94ac6477c6daf0f306c5a3b9e6b1",
"sha256:f92f789e4f9241cd262ad7a555ca2c648a98178a953af117ef7fad46aa1d5591"
],
"version": "==1.14.3"
},
"cryptography": {
"hashes": [
"sha256:21b47c59fcb1c36f1113f3709d37935368e34815ea1d7073862e92f810dc7499",
"sha256:451cdf60be4dafb6a3b78802006a020e6cd709c22d240f94f7a0696240a17154",
"sha256:4549b137d8cbe3c2eadfa56c0c858b78acbeff956bd461e40000b2164d9167c6",
"sha256:48ee615a779ffa749d7d50c291761dc921d93d7cf203dca2db663b4f193f0e49",
"sha256:559d622aef2a2dff98a892eef321433ba5bc55b2485220a8ca289c1ecc2bd54f",
"sha256:5d52c72449bb02dd45a773a203196e6d4fae34e158769c896012401f33064396",
"sha256:65beb15e7f9c16e15934569d29fb4def74ea1469d8781f6b3507ab896d6d8719",
"sha256:680da076cad81cdf5ffcac50c477b6790be81768d30f9da9e01960c4b18a66db",
"sha256:762bc5a0df03c51ee3f09c621e1cee64e3a079a2b5020de82f1613873d79ee70",
"sha256:89aceb31cd5f9fc2449fe8cf3810797ca52b65f1489002d58fe190bfb265c536",
"sha256:983c0c3de4cb9fcba68fd3f45ed846eb86a2a8b8d8bc5bb18364c4d00b3c61fe",
"sha256:99d4984aabd4c7182050bca76176ce2dbc9fa9748afe583a7865c12954d714ba",
"sha256:9d9fc6a16357965d282dd4ab6531013935425d0dc4950df2e0cf2a1b1ac1017d",
"sha256:a7597ffc67987b37b12e09c029bd1dc43965f75d328076ae85721b84046e9ca7",
"sha256:ab010e461bb6b444eaf7f8c813bb716be2d78ab786103f9608ffd37a4bd7d490",
"sha256:b12e715c10a13ca1bd27fbceed9adc8c5ff640f8e1f7ea76416352de703523c8",
"sha256:b2bded09c578d19e08bd2c5bb8fed7f103e089752c9cf7ca7ca7de522326e921",
"sha256:b372026ebf32fe2523159f27d9f0e9f485092e43b00a5adacf732192a70ba118",
"sha256:cb179acdd4ae1e4a5a160d80b87841b3d0e0be84af46c7bb2cd7ece57a39c4ba",
"sha256:e97a3b627e3cb63c415a16245d6cef2139cca18bb1183d1b9375a1c14e83f3b3",
"sha256:f0e099fc4cc697450c3dd4031791559692dd941a95254cb9aeded66a7aa8b9bc",
"sha256:f99317a0fa2e49917689b8cf977510addcfaaab769b3f899b9c481bbd76730c2"
],
"version": "==3.1.1"
},
"csscompressor": {
"hashes": [
"sha256:afa22badbcf3120a4f392e4d22f9fff485c044a1feda4a950ecc5eba9dd31a05"
],
"version": "==0.9.5"
},
"docutils": {
"hashes": [
"sha256:0c5b78adfbf7762415433f5515cd5c9e762339e23369dbe8000d84a4bf4ab3af",
"sha256:c2de3a60e9e7d07be26b7f2b00ca0309c207e06c100f9cc2a94931fc75a478fc"
],
"version": "==0.16"
},
"fabric3": {
"hashes": [
"sha256:647e485ec83f30b587862f92374d6affc217f3d79819d1d7f512e42e7ae51e81",
"sha256:7c5a5f2eb3079eb6bd2a69931f1ca298844c730ce3fdc68111db16e8857a0408"
],
"index": "pypi",
"version": "==1.14.post1"
},
"feedgenerator": {
"hashes": [
"sha256:a28a5add781509390d1a6a52d017829853ee4bef1d2e7b4d5da0e9d1b395ce54",
"sha256:a81d240a2c74a2ae80c393cfe374aff1a19ed3fe41a488eacc66edbe46d3f422"
],
"version": "==1.9.1"
},
"htmlmin": {
"hashes": [
"sha256:50c1ef4630374a5d723900096a961cff426dff46b48f34d194a81bbe14eca178"
],
"version": "==0.1.12"
},
"jinja2": {
"hashes": [
"sha256:89aab215427ef59c34ad58735269eb58b1a5808103067f7bb9d5836c651b3bb0",
"sha256:f0a4641d3cf955324a89c04f3d94663aa4d638abe8f733ecd3582848e1c37035"
],
"version": "==2.11.2"
},
"markupsafe": {
"hashes": [
"sha256:00bc623926325b26bb9605ae9eae8a215691f33cae5df11ca5424f06f2d1f473",
"sha256:09027a7803a62ca78792ad89403b1b7a73a01c8cb65909cd876f7fcebd79b161",
"sha256:09c4b7f37d6c648cb13f9230d847adf22f8171b1ccc4d5682398e77f40309235",
"sha256:1027c282dad077d0bae18be6794e6b6b8c91d58ed8a8d89a89d59693b9131db5",
"sha256:13d3144e1e340870b25e7b10b98d779608c02016d5184cfb9927a9f10c689f42",
"sha256:24982cc2533820871eba85ba648cd53d8623687ff11cbb805be4ff7b4c971aff",
"sha256:29872e92839765e546828bb7754a68c418d927cd064fd4708fab9fe9c8bb116b",
"sha256:43a55c2930bbc139570ac2452adf3d70cdbb3cfe5912c71cdce1c2c6bbd9c5d1",
"sha256:46c99d2de99945ec5cb54f23c8cd5689f6d7177305ebff350a58ce5f8de1669e",
"sha256:500d4957e52ddc3351cabf489e79c91c17f6e0899158447047588650b5e69183",
"sha256:535f6fc4d397c1563d08b88e485c3496cf5784e927af890fb3c3aac7f933ec66",
"sha256:596510de112c685489095da617b5bcbbac7dd6384aeebeda4df6025d0256a81b",
"sha256:62fe6c95e3ec8a7fad637b7f3d372c15ec1caa01ab47926cfdf7a75b40e0eac1",
"sha256:6788b695d50a51edb699cb55e35487e430fa21f1ed838122d722e0ff0ac5ba15",
"sha256:6dd73240d2af64df90aa7c4e7481e23825ea70af4b4922f8ede5b9e35f78a3b1",
"sha256:717ba8fe3ae9cc0006d7c451f0bb265ee07739daf76355d06366154ee68d221e",
"sha256:79855e1c5b8da654cf486b830bd42c06e8780cea587384cf6545b7d9ac013a0b",
"sha256:7c1699dfe0cf8ff607dbdcc1e9b9af1755371f92a68f706051cc8c37d447c905",
"sha256:88e5fcfb52ee7b911e8bb6d6aa2fd21fbecc674eadd44118a9cc3863f938e735",
"sha256:8defac2f2ccd6805ebf65f5eeb132adcf2ab57aa11fdf4c0dd5169a004710e7d",
"sha256:98c7086708b163d425c67c7a91bad6e466bb99d797aa64f965e9d25c12111a5e",
"sha256:9add70b36c5666a2ed02b43b335fe19002ee5235efd4b8a89bfcf9005bebac0d",
"sha256:9bf40443012702a1d2070043cb6291650a0841ece432556f784f004937f0f32c",
"sha256:ade5e387d2ad0d7ebf59146cc00c8044acbd863725f887353a10df825fc8ae21",
"sha256:b00c1de48212e4cc9603895652c5c410df699856a2853135b3967591e4beebc2",
"sha256:b1282f8c00509d99fef04d8ba936b156d419be841854fe901d8ae224c59f0be5",
"sha256:b2051432115498d3562c084a49bba65d97cf251f5a331c64a12ee7e04dacc51b",
"sha256:ba59edeaa2fc6114428f1637ffff42da1e311e29382d81b339c1817d37ec93c6",
"sha256:c8716a48d94b06bb3b2524c2b77e055fb313aeb4ea620c8dd03a105574ba704f",
"sha256:cd5df75523866410809ca100dc9681e301e3c27567cf498077e8551b6d20e42f",
"sha256:cdb132fc825c38e1aeec2c8aa9338310d29d337bebbd7baa06889d09a60a1fa2",
"sha256:e249096428b3ae81b08327a63a485ad0878de3fb939049038579ac0ef61e17e7",
"sha256:e8313f01ba26fbbe36c7be1966a7b7424942f670f38e666995b88d012765b9be"
],
"version": "==1.1.1"
},
"paramiko": {
"hashes": [
"sha256:4f3e316fef2ac628b05097a637af35685183111d4bc1b5979bd397c2ab7b5898",
"sha256:7f36f4ba2c0d81d219f4595e35f70d56cc94f9ac40a6acdf51d6ca210ce65035"
],
"version": "==2.7.2"
},
"pelican": {
"hashes": [
"sha256:2c5c721bc95caa406673d74a5b906f42289930dabee598b41930a18cf98742a5",
"sha256:d08119a3ed3eb7d3f2d0a9a5b929be648ad3c84d018b19517ecc608383b824e6"
],
"index": "pypi",
"version": "==4.5.0"
},
"pelican-minification": {
"hashes": [
"sha256:8c3b1c312171ef86164a0f12d86301d0816a73c090cff7fb3d863481da813228"
],
"index": "pypi",
"version": "==0.1.1"
},
"pycparser": {
"hashes": [
"sha256:2d475327684562c3a96cc71adf7dc8c4f0565175cf86b6d7a404ff4c771f15f0",
"sha256:7582ad22678f0fcd81102833f60ef8d0e57288b6b5fb00323d101be910e35705"
],
"version": "==2.20"
},
"pygments": {
"hashes": [
"sha256:307543fe65c0947b126e83dd5a61bd8acbd84abec11f43caebaf5534cbc17998",
"sha256:926c3f319eda178d1bd90851e4317e6d8cdb5e292a3386aac9bd75eca29cf9c7"
],
"version": "==2.7.1"
},
"pynacl": {
"hashes": [
"sha256:06cbb4d9b2c4bd3c8dc0d267416aaed79906e7b33f114ddbf0911969794b1cc4",
"sha256:11335f09060af52c97137d4ac54285bcb7df0cef29014a1a4efe64ac065434c4",
"sha256:2fe0fc5a2480361dcaf4e6e7cea00e078fcda07ba45f811b167e3f99e8cff574",
"sha256:30f9b96db44e09b3304f9ea95079b1b7316b2b4f3744fe3aaecccd95d547063d",
"sha256:4e10569f8cbed81cb7526ae137049759d2a8d57726d52c1a000a3ce366779634",
"sha256:511d269ee845037b95c9781aa702f90ccc36036f95d0f31373a6a79bd8242e25",
"sha256:537a7ccbea22905a0ab36ea58577b39d1fa9b1884869d173b5cf111f006f689f",
"sha256:54e9a2c849c742006516ad56a88f5c74bf2ce92c9f67435187c3c5953b346505",
"sha256:757250ddb3bff1eecd7e41e65f7f833a8405fede0194319f87899690624f2122",
"sha256:7757ae33dae81c300487591c68790dfb5145c7d03324000433d9a2c141f82af7",
"sha256:7c6092102219f59ff29788860ccb021e80fffd953920c4a8653889c029b2d420",
"sha256:8122ba5f2a2169ca5da936b2e5a511740ffb73979381b4229d9188f6dcb22f1f",
"sha256:9c4a7ea4fb81536c1b1f5cc44d54a296f96ae78c1ebd2311bd0b60be45a48d96",
"sha256:c914f78da4953b33d4685e3cdc7ce63401247a21425c16a39760e282075ac4a6",
"sha256:cd401ccbc2a249a47a3a1724c2918fcd04be1f7b54eb2a5a71ff915db0ac51c6",
"sha256:d452a6746f0a7e11121e64625109bc4468fc3100452817001dbe018bb8b08514",
"sha256:ea6841bc3a76fa4942ce00f3bda7d436fda21e2d91602b9e21b7ca9ecab8f3ff",
"sha256:f8851ab9041756003119368c1e6cd0b9c631f46d686b3904b18c0139f4419f80"
],
"version": "==1.4.0"
},
"python-dateutil": {
"hashes": [
"sha256:73ebfe9dbf22e832286dafa60473e4cd239f8592f699aa5adaf10050e6e1823c",
"sha256:75bb3f31ea686f1197762692a9ee6a7550b59fc6ca3a1f4b5d7e32fb98e2da2a"
],
"version": "==2.8.1"
},
"pytz": {
"hashes": [
"sha256:a494d53b6d39c3c6e44c3bec237336e14305e4f29bbf800b599253057fbb79ed",
"sha256:c35965d010ce31b23eeb663ed3cc8c906275d6be1a34393a1d73a41febf4a048"
],
"version": "==2020.1"
},
"six": {
"hashes": [
"sha256:30639c035cdb23534cd4aa2dd52c3bf48f06e5f4a941509c8bafd8ce11080259",
"sha256:8b74bedcbbbaca38ff6d7491d76f2b06b3592611af620f8426e82dddb04a5ced"
],
"version": "==1.15.0"
},
"unidecode": {
"hashes": [
"sha256:1d7a042116536098d05d599ef2b8616759f02985c85b4fef50c78a5aaf10822a",
"sha256:2b6aab710c2a1647e928e36d69c21e76b453cd455f4e2621000e54b2a9b8cce8"
],
"version": "==1.1.1"
}
},
"develop": {}
}
blog-b99513ebadc5f39c77d109597804b76b79e5a2c0/README.md 0000664 0000000 0000000 00000002434 13744741101 0020666 0 ustar 00root root 0000000 0000000 # Blog
> My (Nimrod Adar) blog.
## Requirements
- Python 3.6
- [Pipenv](https://pipenv.org)
- [Docker](https://www.docker.com/) (Only needed for running the lighthouse and
sitespeed tests)
- [direnv](http://direnv.net/) (Just for ease of use)
## Usage
All tasks are done via `fab` using `pipenv`
```
pipenv run $ fab -l
Available commands:
build Build local version of site
clean Remove generated files
dev Auto-regenerate files and serve at http://localhost:8080/
lighthouse Run Chrome's Lighthouse report against the local dev server
preview Build production version of site
publish Publish to production via rsync
regenerate Automatically regenerate site upon file modification
serve Serve site at http://localhost:8080/
sitespeed Run sitespeed test against the local dev server
```
## License
This software is licensed under the [Creative Commons - Attribution 4.0
International license](https://creativecommons.org/licenses/by/4.0/).
## Author Information
Nimrod Adar, [contact me](mailto:nimrod@shore.co.il) or visit my [website](
https://www.shore.co.il/). Patches are welcome via [`git send-email`](
http://git-scm.com/book/en/v2/Git-Commands-Email). The repository is located
at: .
blog-b99513ebadc5f39c77d109597804b76b79e5a2c0/content/ 0000775 0000000 0000000 00000000000 13744741101 0021056 5 ustar 00root root 0000000 0000000 blog-b99513ebadc5f39c77d109597804b76b79e5a2c0/content/ansible-example-role.rst 0000664 0000000 0000000 00000012245 13744741101 0025621 0 ustar 00root root 0000000 0000000 An example Ansible role
#######################
:date: 2016-05-19
:summary: An example Ansible role.
A few weeks ago I started a new job and a lot of time was spent on refactoring
as well as adding to an existing Ansible automation code base. For me this was a
chance to work more with `Molecule `_ for
testing. Molecule is a infrastructure-as-code testing tool that is
inspired by Test-kitchen and the tests can be written using `Testinfra
`_ which in turn is using `pytest
`_. The reasons for me to choose this combination is that
the tools are written in Python and that they're focused on Ansible. However I
quickly grew tired of copying files from role to role or making the same
changes to files again and again. So in that spirit I created a new Git repo
with an empty Ansible role (no tasks, variables, handlers etc.) but has all of
my changes and tweaks already applied and working tests out of the box.
Usage
-----
To work on the role install VirtualBox and Vagrant (I use the versions in
Debian's repos) and from PyPI Ansible, Molecule and Testinfra. Now, fork the
repo. As you can see there are already README and LICENSE files. If you ever
ran :code:`ansible-galaxy init` or :code:`molecule init` you'll notice that
indeed the repo was created with those tools.
Dependencies
------------
There's an example dependency present in :code:`meta/main.yml` but instead of
the declaring the dependencies in :code:`meta/main.yml` and the sources of the
dependencies in :code:`requirements.yml` which leads to repeating yourself, the
example shows how to declare the source of the dependent role directly in
:code:`meta/main.yml` (which I haven't seen mentioned clearly in the Ansible
documentation. For repositories with playbooks I'd still add a
:code:`requirements.yml` file since there's no meta directory. Pulling the
dependencies took some thought and what I came up with is:
.. code:: shell
ansible-galaxy install git+file://$(pwd),$(git rev-parse --abbrev-ref HEAD)
This is a workaround for installing the dependencies as it actually uses
ansible-galaxy to install the git repo of the role and the dependencies as well.
Testing
-------
First, I configured `pre-commit `_ hooks that check,
among other things, the validity of the YAML files and the does a syntax check
of the Ansible playbook.
As for Molecule, the configuration of the test environment is mainly under
:code:`molecule.yml`. That is were you'd go to change the Vagrant box to test.
You can add multiple boxes and specify which box to test like so
:code:`molecule test --platform `.
Also worth mentioning is the Ansible configuration in :code:`ansible.cfg`.
This is some what of a workaround as well because many of the options can be
configured in :code:`molecule.yml` which is used to generate its own
:code:`ansible.cfg`. However since Testinfra runs the tests over Ansible and
Molecule doesn't pass the configuration along to it, the configuration isn't
honored during testing. This caused me some grief as tests were constantly
failing because Ansible would the host SSH key and fail as it was not known.
The way I did is create an :code:`ansible.cfg` at the root of the repo where
Testinfra would look and passed that as the template to Molecule.
The playbook that is run in at :code:`tests/playbook.yml` and the tests are
under :code:`tests/` as well. There's an simple example test but the Testinfra
documentation quite good. Just remember to that both the filename and function
name should start with :code:`test_` and you won't have tests that aren't found.
A word on CI
------------
Now you have all of the different pieces and workflow to run complete tests on
roles the next obvious step is setting up a CI pipeline. In my tests and as I
know the various CI services (I tried Travis-CI and CircleCI) disable the
option to run any hypervisor. For me it's a deal breaker because I depend on
VirtualBox (I need to test on different OSes, not just Linux). If LXC serves
your needs than you should be able to run Vagrant with the LXC provider and
therefore Molecule. For me it's a deal breaker.
A final word on boiler-plate
----------------------------
In a previous post I mentioned that I have several repositories that have the
same boiler-plate and how I plan on dealing with that. Now, this is the first
attempt at this. The idea is having a base repo that I clone, add another remote
and voilà, a new project with the scaffolding already there. For bonus points, I
can update the base repo and pull those changes in all projects. Here's how I do
it:
.. code:: shell
git clone https://www.shore.co.il/git/ansible-role-example ansible-role-name
cd ansible-role-name
for file in $(git grep -l ansible-role-example); do sed -i 's/ansible-role-example/ansible-role-name/g' $file; done
git add .
git commit -m"- Renamed ansible-role-example to ansible-role-name."
git remote rename origin ansible-role-example
git remote add origin git@example.com/path/to/repo
git push -u origin master
And in case I update the ansible-role-example repo than I pull the updates by
running :code:`git pull ansible-role-example master`.
blog-b99513ebadc5f39c77d109597804b76b79e5a2c0/content/ansible-modules.rst 0000664 0000000 0000000 00000004353 13744741101 0024700 0 ustar 00root root 0000000 0000000 Sharing Ansible modules
#######################
:date: 2015-11-15
:summary: How to share Ansible modules
With Ansible you're expected to share roles with the Ansible Galaxy tool (either
through the `Ansible Galaxy hub `_ or just using
straight git repositories). This works well enough (and personally I am using
:code:`ansible-galaxy init` to start each new role, even those that I'm not going to
share with the community). However, for sharing modules there is no such easy
solution, or is it?
Sharing with git submodule
--------------------------
I'd like to start by saying that git submodule is the poor man's package
manager and it's lack of popularity is (somewhat) justified. However, this is a
nice demonstration of a case where there is no package manager available and of
using git submodule instead. Also, I've only been able to use this technique for
modules written in Python, which is nice considering the lack of boiler-plate
that Ansible provides and that Python is my personal preference.
The whole story is really quite simple, create a separate git repository with
the modules in it. You can put them in sub-directories and as a far as I know,
there's no restriction on the hierarchy depth. In your playbook directory create
a :code:`library` directory (the Ansible default, so you can change this in
:code:`ansible.cfg`) and create an empty :code:`__init__.py` file inside that
directory. Add a git submodule inside that directory and you're done. Let's see an
example
.. code:: shell
git init ansible-modules
cd ansible-modules
# Write great module
git commit -a
git push
cd /path/to/your/ansible/playbook/repository
mkdir library
touch library/__init__.py
git submodule add host:/path/to/ansible-modules library/my_modules
git add .gitmodules
git commit
git push
Really, not that complicated. The only magic (undocumented) bit is creating a
:code:`__init__.py` file inside the :code:`library` directory, which is a
shame that the Ansible documentation doesn't cover that. If you want to see a
real-life example, checkout my `ansible-playbooks
`_ and `ansible-modules
`_ git repos.
blog-b99513ebadc5f39c77d109597804b76b79e5a2c0/content/ansible-python.rst 0000664 0000000 0000000 00000005467 13744741101 0024560 0 ustar 00root root 0000000 0000000 Using Ansible as a Python module
================================
:date: 2015-01-01
:summary: Using Ansible as a Python module when playbooks are not enough.
At my current employer we have several servers in production with various
providers, some of them with multiple IP addresses. When configuring the
firewall to allow traffic from other servers I reached for Ansible. The
obvious solution was to use a nested loop, something like this:
.. code:: yaml
- name: Allow other servers
ufw:
rule: allow
from_ip: '{{ item[1] }}'
with_nested:
- all_hosts
- '{{ item.ansible_all_ipv4_addresses }}'
However, this syntax is invalid (and other variations I tried). Using
:code:`include` with :code:`with_items` is deprecated and I didn't manage
to get it to work with registering variables as well. What I had left was
programmatically generating a playbook, but investigating further I found that
Ansible can be imported as a Python module.
Incorporating Ansible in Python
-------------------------------
To retrieve all of the ip addresses I'd ran the setup module to gather the
information
.. code:: python
from ansible.runner import Runner
struct = Runner (module_name='setup', pattern='all_hosts').run()
Now we have a complex data structure that is the output of Ansible's fact
gathering module. Running it in the interpreter and examining the structure is
not hard at all and that is how I managed to write the following code to extract
a list of all of our server's ip addresses.
.. code:: python
ipaddresses = []
for host in struct['contacted']:
for ip in struct['contacted'][host]['ansible_facts']['ansible_all_ipv4_addresses']:
ipaddresses.append (ip)
Putting that information to good use
------------------------------------
Now that we have a list of the ip addresses, we can start running Ansible
commands right from with Python (just like we did) or build a playbook by
outputting a YAML file. I chose the latter.
.. code:: python
from yaml import safe_dump
doc = {'all_ipv4': ipaddresses}
print (safe_dump (doc), file='vars.yml')
This will create a vars.yml file with the all_ipv4 variable already defined
there to be imported to any playbook and run. For example:
.. code:: yaml
---
- hosts: all_hosts
vars_files:
- vars.yml
tasks:
- name: Allow other servers
with_items: all_ipv4
ufw:
rule: allow
from_ip: '{{ item }}'
With this much little code we were able to query all of our hosts, extract the
needed information and output it back to Ansible for further use. I see this as
a product of the good decisions the Ansible developers choose early on (YAML,
Python, SSH). As always, for any feedback you may have, `email me `_.
blog-b99513ebadc5f39c77d109597804b76b79e5a2c0/content/aws_change_own_password.rst 0000664 0000000 0000000 00000003676 13744741101 0026530 0 ustar 00root root 0000000 0000000 Self service AWS IAM policy
###########################
:date: 2016-09-01
:summary: AWS IAM policy to allow users to change their own password and manage
their own keys.
A common practice for me when a new member joins the team or when someone
forgets his/ her AWS account password is to change the account password myself,
send the new password over an insecure channel (email, Slack) but force the
account to change the password on first login. Also, I prefer to have users
manage their own keys to AWS themselves. But without the correct IAM policy
users aren't able to perform either action. Here's an IAM to allow both:
.. code:: json
{
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Action": [
"iam:GetAccountPasswordPolicy",
"iam:ListAccount*",
"iam:GetAccountSummary",
"iam:GetAccountPasswordPolicy",
"iam:ListUsers"
],
"Resource": "*"
},
{
"Effect": "Allow",
"Action": [
"iam:ChangePassword",
"iam:*LoginProfile",
"iam:*AccessKey*",
"iam:*SSHPublicKey*"
],
"Resource": "arn:aws:iam:::user/${aws:username}"
}
]
}
If you want a little script with the AWS CLI, here's one for you:
.. code:: shell
tempfile=$(mktemp)
accountid="$(aws ec2 describe-security-groups \
--group-names 'Default' \
--query 'SecurityGroups[0].OwnerId' \
--output text)"
curl https://www.shore.co.il/blog/static/policy.json | sed "s//$accountid/" > $tempfile
aws iam create-policy \
--policy-name change-own-password \
--policy-document file://$tempfile
rm $tempfile
blog-b99513ebadc5f39c77d109597804b76b79e5a2c0/content/bundle_certs.rst 0000664 0000000 0000000 00000005056 13744741101 0024267 0 ustar 00root root 0000000 0000000 bundle_certs
############
:date: 2016-03-02
:summary: Announce a new tool, bundle_certs
Like I said in a previous blog post, I rarely blog but I run :code:`git init
project-name` pretty regularly. So here's a new such repo, `bundle_certs
`_. A simple shell script for
bundling (in the correct order) SSL certificates.
How I start new projects
------------------------
This little tool, along with `ssl-ca `_
and `ssh-ca `_ have some commonality in
how I use them and this seems like a good opportunity to share. I keep my rc
files (like :code:`.vimrc`) in the
`rcfiles _ repo. However I don't install
them as mentioned in the documentation. Instead I add them as Git sub modules
and now I can be reasonably sure that when I clone the rcfiles repository, the
aliases and sourced files mentioned in :code:`.bashrc` are present. Here's how:
.. code:: shell
ssh cgit 'git init --bare /srv/git/REPONAME'
git submodule add -b master -f https://www.shore.co.il/cgit/REPONAME
First I create the remote repository (most of you would probably use Github but
I prefer self hosting). Then I add it as a Git submodule.
Repository boiler-plate
-----------------------
Truth be told, there are more line of tests, documentation, license, etc. than
there is actual code in these repositories. It happened to a few times that I
added something nice to a repository that I wanted to have in all (or most) of
my other repositories and in new repositories going forward.
One solution I thought of is creating a base template repository that all
others are forked from. The upside is if I change something in the base
repository I can fetch it in all other repositories. The downside is not all
repositories are the same (different license, programming language, pre-commit
and git hooks).
Another option I know of are tools that manage a specific aspect of the repo,
for example the license, or :code:`.gitignore`.
A third option is using a project management tool like `Cargo
`_ for Rust or `Leiningen `_
for Clojure. But not all aspects or languages have such tools.
The fourth option I'm thinking of is using a scaffolding tool, mainly `Yeoman
`_ as it seems to the most popular one but its focus is on
JS and webapps.
As of now, my plan is to try and maintain a base repo for certain project types
and see how it goes (Yeoman would just take more time to get started with).
blog-b99513ebadc5f39c77d109597804b76b79e5a2c0/content/docker_entrypoint.rst 0000664 0000000 0000000 00000005611 13744741101 0025355 0 ustar 00root root 0000000 0000000 Expanding variables and shell expressions in parameters to Docker entrypoint
############################################################################
:date: 2016-06-13
:summary: Expanding variables and shell expressions in parameters to Docker entrypoint
A known best practice when creating Docker images is when you need to run
commands in runtime before starting the actual application/ daemon is to create
an entrypoint script and pass the command as parameters in the :code:`CMD`
instruction. Another best practice is to exec the final command so it would be
PID 1 and receive the signals passed to it. Let's create a small example. Here's
the :code:`Dockerfile`:
.. code::
FROM alpine
COPY entrypoint.sh /entrypoint.sh
RUN chmod +x entrypoint.sh
ENTRYPOINT ["/entrypoint.sh"]
ENV var value
CMD ["echo", "$var"]
And the :code:`entrypoint.sh` script:
.. code:: shell
#!/bin/sh
set -eu
# Perform any needed tasks here.
exec $@
Now let's build and run this container:
.. code:: shell
$ docker build --tag entrypoint .
Sending build context to Docker daemon 28.67 kB
Step 0 : FROM alpine
---> 5f05d2ba9e65
Step 1 : COPY entrypoint.sh /entrypoint.sh
---> f59f4d7f3546
Removing intermediate container 27ca546c6b6c
Step 2 : ENTRYPOINT /entrypoint.sh
---> Running in 98c65b63948a
---> 1de45b33021b
Removing intermediate container 98c65b63948a
Step 3 : ENV var value
---> Running in 133a8781f0ac
---> bba451334fb2
Removing intermediate container 133a8781f0ac
Step 4 : CMD echo $var
---> Running in e8436c6c3202
---> a49d9b335b74
Removing intermediate container e8436c6c3202
Successfully built a49d9b335b74
$ docker run entrypoint
$var
As we can see the variable :code:`var` wasn't expanded to it's content. After a
bit of head scratching, The following simple change was made to the entrypoint
script.
.. code:: shell
#!/bin/sh
set -eu
# Perform any needed tasks here.
eval "exec $@"
The change is to first evaluate the expression (expanding any variable and
expression found), then :code:`exec` it. The outcome is what you'd expect.
.. code::
$ docker build --tag entrypoint .
Sending build context to Docker daemon 28.67 kB
Step 0 : FROM alpine
---> 5f05d2ba9e65
Step 1 : COPY entrypoint.sh /entrypoint.sh
---> b874d862999d
Removing intermediate container fb6483ff00e3
Step 2 : ENTRYPOINT /entrypoint.sh
---> Running in 82adf0b2c4c7
---> 6674f336c5e1
Removing intermediate container 82adf0b2c4c7
Step 3 : ENV var value
---> Running in 599f3f98c11d
---> 980f1e1e1ad5
Removing intermediate container 599f3f98c11d
Step 4 : CMD echo $var
---> Running in e29f1948480a
---> e27fd79143f8
Removing intermediate container e29f1948480a
Successfully built e27fd79143f8
$ docker run entrypoint
value
blog-b99513ebadc5f39c77d109597804b76b79e5a2c0/content/docker_socket_over_ssh.rst 0000664 0000000 0000000 00000002353 13744741101 0026342 0 ustar 00root root 0000000 0000000 Docker socket over SSH
######################
:date: 2018-01-09
:summary: Docker socket over SSH
Yesterday I described how to connect to a remote :code:`dockerd` over TCP. I
didn't touch security considerations at all (firewall, TLS certificate). This
because, for my use, I prefer a different method, forwarding the Unix socket
over SSH. Here's how.
First, you need OpenSSH version 6.7 or later (both client and server). Also,
the login user on the remote instance must have permissions to access the Docker
socket (in other words, be a member of the :code:`docker` group).
Here's how to forward the remote socket:
.. code:: shell
ssh -fNTo ExitOnForwardFailure=yes -o ServerAliveInterval=30 -L $HOME/.ssh/docker.sock:/var/run/docker.sock host
export DOCKER_HOST=$HOME/.ssh/docker.sock
And to close the connection and return to the local :code:`dockerd` kill the
:code:`ssh` process that's running in the background, :code:`rm` the docker
socket under :code:`$HOME/.ssh` and unset :code:`DOCKER_HOST`.
The reason I prefer this method is that it's easier to setup for ad-hoc tasks
and arguably more secure since you not only authenticate the user and host with
SSH, but you limit access to only those that are part of the :code:`docker`
group.
blog-b99513ebadc5f39c77d109597804b76b79e5a2c0/content/docker_tcp_socket.rst 0000664 0000000 0000000 00000001622 13744741101 0025276 0 ustar 00root root 0000000 0000000 Bind dockerd to a TCP port
##########################
:date: 2018-01-08
:summary: Bind dockerd to a TCP port
On a modern system (one running Systemd) when installing Docker, the
:code:`dockerd` daemon is run using Systemd' socket activation. By default the
socket is :code:`/var/run/docker.sock`. If you want to connect to a remote
machine over TCP, the obvious thing to do is to create
:code:`/etc/docker/daemon.json` and set the :code:`hosts` list there. But that
will conflict with the command line flags for socket activation. The correct way
is to override Systemd' socket activation config. Here's how (all command are as
:code:`root`):
.. code:: shell
mkdir -p /etc/systemd/system/docker.socket.d
echo '[Socket]' > /etc/systemd/system/docker.socket.d/tcp.conf
echo 'ListenStream=2375' >> /etc/systemd/system/docker.socket.d/tcp.conf
systemctl daemon-reload
systemctl restart docker
blog-b99513ebadc5f39c77d109597804b76b79e5a2c0/content/docker_uid.rst 0000664 0000000 0000000 00000005347 13744741101 0023731 0 ustar 00root root 0000000 0000000 Building inside a Docker container with the correct user
########################################################
:date: 2017-11-26
:summary: Building inside a Docker container with the correct user
Lately I've been using Docker container as clean, easily portable and easily
removable build environments. In those cases the image contains the needed build
tools and the project is mounted to a volume inside the container. The artifacts
are then built inside the container but are placed inside the volume. However
a small problem arises, the artifacts (and whatever other files are created,
like cache) are owned by the default user, :code:`root`, making editing or
removing said files less straightforward.
The trivial solution
--------------------
The trivial solution is to run the container with the correct user id, like so
.. code:: shell
uid="$(id -u)"
gid="$(id -g)"
docker run -v "$PWD:/volume" --user "$uid:$gid" buildimage make
I personally find it a tiresome after the 3rd time I had to `sudo chown` the
project because I forgot to specify the uid and gid and it's a (low) barrier
of entry for new users.
A better solution
-----------------
The solution I've come up with is this small script that sets the uid and gid
values to those of the owner and group for the volume and then execute the
commands.
.. code:: shell
#!/bin/sh
set -eu
[ "$(id -u)" = "0" ] || { echo "Not running as root, continuing as the current user."; eval exec "$@"; }
command -v stat > /dev/null || { echo "Can't find stat, exiting."; exit 1; }
command -v gosu > /dev/null || { echo "Can't find gosu, exiting."; exit 1; }
uid="$(stat . -c '%u')"
gid="$(stat . -c '%g')"
eval exec gosu "$uid:$gid" "$@"
The script is also available for `download
`_. The only dependency is
`gosu `_. You can download and check it to
your VCS and incorporate it into your Dockerfile, or download it via the
:code:`ADD` directive, like so:
.. code:: shell
FROM buildpack-deps
RUN curl -fsSL https://github.com/tianon/gosu/releases/download/1.10/gosu-amd64 -o gosu-amd64 && \
install -o root -g root -m 755 gosu-amd64 /usr/local/bin/gosu && \
rm gosu-amd64 && \
curl -fsSL https://www.shore.co.il/blog/static/runas -o runas && \
install -o root -g root -m 755 runas /entrypoint && \
rm runas
ENTRYPOINT [ "/entrypoint" ]
VOLUME /volume
WORKDIR /volume
ENV HOME /volume
Setting the home directory to the mounted volume will result in some files (like
package managers cache) to be created there, which you may or may not want. And
then finally, to build run
.. code:: shell
docker run -v "$PWD:/volume" buildimage make
blog-b99513ebadc5f39c77d109597804b76b79e5a2c0/content/git_serve.rst 0000664 0000000 0000000 00000002646 13744741101 0023607 0 ustar 00root root 0000000 0000000 Ad-hoc serving of git repositories
##################################
:date: 2016-08-16
:summary: Ad-hoc serving of git repositories
On some occasion you want to serve your git repo from your local copy (perhaps
your git repository is quite large and your internet connection is slow or your
build process would benefit from pulling from an intermediary without
authentication). Here are 2 ways to serve your git repository without any
configuration or software installation. Both ways serve a single repository
without authentication or encryption but read-only (no push).
Using the git protocol
----------------------
The git executable is itself a git server using the native git protocol. Inside
the root of the repository run the following command
.. code:: shell
git daemon --reuseaddr --verbose --base-path=. --export-all ./.git
And on the client you can clone by running
.. code:: shell
git clone git://servername/ reponame
Using the HTTP protocol
-----------------------
This way serves the repo over HTTP using Python 2's SimpleHTTPServer. Run the
following in the rot of the git repo
.. code:: shell
git update-server-info
cd .git
python -m SimpleHTTPServer
And on the client clone by running
.. code:: shell
git clone http://servername:8000/ reponame
Final words
-----------
I've added both ways as git aliases in my `rcfiles repo
`_.
blog-b99513ebadc5f39c77d109597804b76b79e5a2c0/content/pages/ 0000775 0000000 0000000 00000000000 13744741101 0022155 5 ustar 00root root 0000000 0000000 blog-b99513ebadc5f39c77d109597804b76b79e5a2c0/content/pages/about.rst 0000664 0000000 0000000 00000002570 13744741101 0024025 0 ustar 00root root 0000000 0000000 About me
########
:date: 2014-04-19
:summary: All about Nimrod.
:slug: about-me
Hi.
I'm Nimrod Adar (נמרוד אדר), a freelance DevOps engineer, Linux and BSD sysadmin
and a backend Python developer residing in Zikhron Ya'akov, Israel.
This is my blog about what I do and thus it serves 2 purposes. The 1st is to be
my personal journal to document the how or why of some problem solving. The 2nd
is more outward facing, for it to be a (by no way complete) resource for others
and for me to publish whenever I need to. The other major part of the site is my
code repo (click on the "Code" link at the top of the page), where I publish my
opensource software.
you can contact me at `nimrod@shore.co.il `_ or at
`052-8713696 `_ (`+972-52-871-3696 `_ outside
of Israel).
Résumé
------
My résumé is available online in the following formats:
- `HTML (single file) `_
- `OpenDocument text `_
- `PDF `_
- `Microsoft Word docx `_
There's also a `repository `_ that
contains the original reStructuredText version and publishing pipeline.
Last thing, my public `GPG public key <{static}/static/nimrod.asc>`_
blog-b99513ebadc5f39c77d109597804b76b79e5a2c0/content/pages/spam.rst 0000664 0000000 0000000 00000001521 13744741101 0023646 0 ustar 00root root 0000000 0000000 Spam Nimrod
###########
:date: 2020-10-23
:summary: Spam Nimrod
:slug: spam
:status: hidden
.. raw:: html
blog-b99513ebadc5f39c77d109597804b76b79e5a2c0/content/pelican.rst 0000664 0000000 0000000 00000013157 13744741101 0023232 0 ustar 00root root 0000000 0000000 Blogging with Pelican
#####################
:date: 2014-04-19
:summary: How I blog with Pelican
What is Pelican?
----------------
`Pelican `_ is a static site generator.
It's written in Python, focusing on blogs, using reStructuredText, Jinja2 and Fabric (but you can use Markdown and makefiles and has provisions for normal web pages as well).
It's a pythonic tool that's easy to use and was a breeze to setup.
Installing Pelican
------------------
As Pelican is a static blog/ website generator, all we're doing is in your workstation. All you need to have server-wise is a bog-standard web server (like Apache or Nginx). Everything else is done on your local machine. I installed Pelican from Debian (it's currently available in testing)
.. code:: shell
apt-get install python-pelican fabric
Alternatively, you can use pip
.. code:: shell
pip install pelican fabric
Creating a blog
---------------
Create a blog directory and an empty blog ::
$ mkdir blog
$ cd blog
$ pelican-quickstart
Welcome to pelican-quickstart v3.4.0.
This script will help you create a new Pelican-based website.
Please answer the following questions so this script can generate the files
needed by Pelican.
> Where do you want to create your new web site? [.]
> What will be the title of this web site? My Blog
> Who will be the author of this web site?
> What will be the default language of this web site? [en]
> Do you want to specify a URL prefix? e.g., http://example.com (Y/n)
> What is your URL prefix? (see above example; no trailing slash)
> Do you want to enable article pagination? (Y/n)
> How many articles per page do you want? [10]
> Do you want to generate a Fabfile/Makefile to automate generation and publishing? (Y/n)
> Do you want an auto-reload & simpleHTTP script to assist with theme and site development? (Y/n)
> Do you want to upload your website using FTP? (y/N)
> Do you want to upload your website using SSH? (y/N) y
> What is the hostname of your SSH server? [localhost]
> What is the port of your SSH server? [22]
> What is your username on that server? [root]
> Where do you want to put your web site on that server? [/var/www]
> Do you want to upload your website using Dropbox? (y/N)
> Do you want to upload your website using S3? (y/N)
> Do you want to upload your website using Rackspace Cloud Files? (y/N)
> Do you want to upload your website using GitHub Pages? (y/N)
Done. Your new project is available at blog
Since Pelican uses OpenSSH, you can use servers defined in your SSH preferences. Now, lets configure the blog to our liking.
Configuration
-------------
In the blog directory there are the 2 configuration files: pelicanconf.py for configuring Pelican and publishconf.py for configuration that are only for publishing using Make or Fabric. Pelican also creates standard Makefile and fabfile.py for you. I've made the following modifications to pelicanconf.py: ::
TIMEZONE = 'Asia/Jerusalem'
PATH = "content"
DIRECT_TEMPLATES = ('index', 'archives')
DISPLAY_CATEGORIES_ON_MENU = False
DISPLAY_PAGES_ON_MENU = True
TAGS_SAVE_AS = ''
TAG_SAVE_AS = ''
STATIC_PATH = ['static']
And to publishconf.py: ::
CATEGORY_FEED_ATOM = None
I've set the timezone to mine (so that the time of published articles is correct), add everything under contents/static as static contents to be uploaded to the server, disabled showing of categories of articles and creating feeds for them, disabled saving of articles by tags and set pages (which are simple web pages unlike articles which are blog entries) to show on the menu. Next, themes.
Themes
------
Pelican comes with a default theme (the same as used by Pelican's website) but I wanted something more understated so I took at look at `https://github.com/getpelican/pelican-themes `_ and chose pelican-mockingbird. Either clone it or add it as a git submodule (depends on if you're using Git to version control your blog or not)
.. code:: shell
git clone https://github.com/wrl/pelican-mockingbird.git #If you're not using Git.
git submodule add https://github.com/wrl/pelican-mockingbird.git #If you're using Git.
and set the theme to that by adding the following to pelicanconf.py: ::
THEME = "./pelican-mockingbird"
I've also edited :code:`base.html` and :code:`article.html` inside of :code:`pelican-mockingbird/templates` to suite my liking. Next, let us add a new entry.
Adding an entry
---------------
Create a ReStructuredText file inside of contents. The filename is for personal use and not critical. The heading is the article name and you can add the following for Pelican to use: ::
:date: 2014-04-19
:slug: this-will-the-filename
:author:
:summary:
After we added the content we want to upload it to our web server (I use fabric)
.. code:: shell
fab publish
If you don't have keys set for the server it will ask you for your password to the server.
Last thing, you can create pages, create a pages directory inside contents and save the files there. Their format is the same as articles but they'll have a somewhat template applied and they will be shown in the menu. A good example will an 'About Me' page.
That's it, you now have Pelican installed, configured and published to your web site. If you want to see a real life example, clone `my blog `_.
blog-b99513ebadc5f39c77d109597804b76b79e5a2c0/content/pocketchip-xmodmap.rst 0000664 0000000 0000000 00000001164 13744741101 0025406 0 ustar 00root root 0000000 0000000 How to fix the PocketCHIP for Vim
#################################
:date: 2017-04-22
:summary: How to fix the PocketCHIP for Vim
I ordered me a PocketCHIp to have a cheap, portable Linux computer with a
physical keyboard with the added benefit if it running Debian out of the box. I
quickly discovered that in Vim the dash or minus key is not what you'd expect. A
quick search turned up that the key is mapped the numpad minus via
:code:`xmopmap`. To change the mapping to what is for me a better setting run
:code:`sed -i 's/KP_Subtract/minus/g' ~/.Xmodmap` and to apply the setting
afterward run :code:`xmodmap ~/.Xmodmap`.
blog-b99513ebadc5f39c77d109597804b76b79e5a2c0/content/pre-commit.rst 0000664 0000000 0000000 00000002540 13744741101 0023665 0 ustar 00root root 0000000 0000000 Pre-commit hooks
################
:date: 2016-03-12
:summary: New pre-commit hooks I wrote
`Pre-commit `_ is a nice, simple tool to add Git
hooks to your project. The primary goal is running fast checks on commits
(before committing them), mainly linters and syntax checkers. Today I've 2 of my
own, for Ansible playbooks and shell scripts. The Ansible playbooks hook is
located at https://www.shore.co.il/git/ansible-pre-commit/ and the shell scripts
hook is at https://www.shore.co.il/git/shell-pre-commit/. Both have a short
README which describes installation and usage.
My view on testing
------------------
I find that Pre-commit suites my view on proportionate testing. The smaller the
change, the faster the test (and as a result, more trivial). Personally, I
prefer to structure my work as small commits that are easier to revert, these
deserve fast (and more trivial) tests which Pre-commit provides. The bigger the
change, the more rigorous (and thus longer) the test. In my opinion this helps
in creating a good workflow which quickly finds small errors while developing
and reduces the number of times one must ran the full test suite because he/she
had a typo that failed the test. This is why I prefer to separate the test
suite so that I can the ability to run the simpler and faster locally and get
rid of simple error quickly.
blog-b99513ebadc5f39c77d109597804b76b79e5a2c0/content/resolver.rst 0000664 0000000 0000000 00000002755 13744741101 0023462 0 ustar 00root root 0000000 0000000 Testing DNS with a clean cache
##############################
:date: 2016-11-01
:summary: Testing DNS with a clean cache
Every so often I make changes to a DNS record, test it, find out it's wrong, fix
it and still get the old response because of caching somewhere along the line.
After it happened to me and a colleague during a launch of a new version of a
website, I decided to address the issue. I wanted a way to test DNS quickly and
easily (preferably locally on command line), for it to be lightweight, doesn't
require changes to my existing setup and doesn't require learning new tools. I
decided to create a Docker image that has its own DNS resolver and each new
container from that image has a clean cache and doesn't depend on other DNS
servers or is affected from their caching.
Usage
-----
To create a new container:
.. code:: shell
docker run -it adarnimrod/resolver
Inside the container you have access to :code:`nslookup`, :code:`dig` and
:code:`mail` for testing purposes. If you need to test new changes,
:code:`exit` the container and create a new one with no cache.
If you want to run just a single command (like getting the MX record for
:code:`shore.co.il`):
.. code:: shell
docker run adarnimrod/resolver dig +short shore.co.il mx
How does it work
----------------
On launch, the container runs and uses its own DNS resolver (in this case NSD).
This way the OS caching or upstream caching don't interferes with querying and
every new container starts with an empty cache.
blog-b99513ebadc5f39c77d109597804b76b79e5a2c0/content/shell_binary_bundle.rst 0000664 0000000 0000000 00000007347 13744741101 0025627 0 ustar 00root root 0000000 0000000 Bundling a binary file into a shell script
##########################################
:date: 2017-12-06
:summary: Bundling a binary file into a shell script
When creating an auto-scaling group in EC2 I often try to package the deployment
script into the user data. Installing some packaged software is easy to do but
bundling configuration files that are needed is less straightforward.
If the files are not confidential in any way, I either clone a Git repository
or download a tarball from our static assets domain. But this leads to a
dependency on external services and a slightly more complex deployment
procedure. A few days ago I was faced with the same options again but it didn't
sit right with me to do all this for a couple of files that are a few K's in
size totally. I remembered that some software have installation scripts that
bundle the binary blob inside the script.
First version
-------------
I searched and found an article in the `Linux Journal
`_
that seemed to show what I wanted to (and seems to be copied everywhere). You
could download a single file that was a shell script with the binary blob
inside. Your usage will be close to this
.. code:: shell
wget http://hostname.tld/bundle
sh bundle
or this
.. code:: shell
wget http://hostname.tld/bundle
chmod +x bundle
./bundle
Which is fine. However the code was a bit longer than it should have been and
I felt it could be done better. A little more research and I found an answer in
`Stack Overflow `_ that mentioned
:code:`uuencode` and :code:`uudecode`. Reading the man page I saw it was closer
to what I wanted. The code I wrote is available on my `cgit instance
`_.
The implementation works as follows. The bundle has the script at the start of
the file with the encoded binary at the end. The shell executes the script part
(which ends with exit as to not continue any further, causing errors) and
:code:`uudecode` only starts processing after it sees the relevant header. The
script feeds itself to :code:`uudecode` (:code:`uudecode "$0"`) which decodes
the binary and outputs it to disk which the script can then use. The code has
both the build instruction in the :code:`Makefile` and usage example in the
:code:`bats` tests.
Second version
--------------
However something kept nagging me. I wanted a simple invocation method like so:
.. code:: shell
curl http://hostname.tld/bundle | sh
And in the case of the user data in EC2, I could simply use the bundle.
Otherwise I would need to host it somewhere and in the user data I would
download and run the bundle. Which means that if the bundle was unavailable the
instance would fail to provision.
Everything I found assumed that the file was present in the file system for
:code:`uudecode` to decode. If it was piped there was no file that
:code:`uudecode` could then decode. I kept mauling over it and a came up with
a short, clean solution to this problem, which is available `here
`_, again
with build instruction and test examples.
This time I used AWK to replace a single line in the script with the file,
encoded using :code:`uuencode` but this time in base64 (to keep the script valid
without any characters with special meanings). That is piped to :code:`uudecode`
which decodes and saves it to disk. The script can then continue with the
binary blob present.
This method is less space efficient and the build procedure is less obvious. But
the ability to use resulting script as the user data (or piping the output from
:code:`curl` to :code:`sh`) is worth it in my opinion.
blog-b99513ebadc5f39c77d109597804b76b79e5a2c0/content/sourced_or_not.rst 0000664 0000000 0000000 00000002510 13744741101 0024632 0 ustar 00root root 0000000 0000000 Finding is a script sourced or not?
###################################
:date: 2016-02-29
:summary: How to find if a shell script is sourced or not.
I've recently written a shell script that contained several functions and I
wanted to support 2 usage methods. The first is quite regular, marking it as an
executable and running it. The second is to source the script and gain the
functions declared. The problem is not actually performing any tasks (or
outputting anything) if the script is being sourced. It took a bit of fiddling
but I found a short one-liner to add at the top of the script that solves this
in a POSIX-compliant way (at least on my test machines, Debian with Bash and Dash
and KSH on OpenBSD). Here is an example usage:
.. code:: shell
#!/bin/sh -e
# Check if the script is being sourced or not.
[ "$_" != "$0" ] && expr "$-" : ".*i.*" > /dev/null && sourced=1
if [ "$sourced" ]
then
echo Sourced
else
echo Run
fi
The solution is using 2 heuristics. If the last argument (:code:`$_`) if
different from the command name (must be first command run, otherwise the last
argument will be overwritten). The second is if the option flags (:code:`$-`)
contain :code:`i` for interactive. This works when both marking the script as
executable and passing the name as a parameter to the shell.
blog-b99513ebadc5f39c77d109597804b76b79e5a2c0/content/ssh_security.rst 0000664 0000000 0000000 00000007755 13744741101 0024352 0 ustar 00root root 0000000 0000000 SSH security
############
:date: 2016-07-05
:summary: My best practices regarding SSH security
Over the years I'd heard many people share their best practices regarding
securing SSH access and have been asked by friends and colleagues how to secure
their servers. So here are my opinions and practices regarding SSH security. The
main point I try to get across is balance. Balance between security and
functionality.
Practices I avoid
-----------------
First, changing the listening port. The upside is that a high random port is
scanned less often and the various script kiddies sometimes fails to notice it,
thus reducing the noise in the logs. This however is no real security measure
as any capable attacker will quickly spot the daemon listening on a different
and all benefits will be lost. The downside is that by not using the default
port you need to configure all clients accordingly. So, no substantial wins
and minor loss. I pass on this idea.
The second most common is allowing access only from the office IP or a few
select IP addresses. The security benefit is high but the risk is also high. I
view SSH access to servers as critical and limiting access puts you in risk of
locking yourself out in case of trouble/ emergency. Not having management access
to your servers is more dangerous to your business than allowing whomever to
try (not succeed) to access your servers. Therefore I prefer to limit the access
in different ways.
Practices I employ
------------------
Most importantly, allow only key-based authentication. This works without
relying on 3rd party services and is extremely secure when done properly. The
most important issue keeping your private key private. Personally I keep my home
directory on a separate partition that is encrypted with LUKS so the keys are
encrypted when in rest. If you don't have encrypted storage for your keys,
password encrypt the keys (consult the ssh-keygen manual for instructions).
Another measure of security is limiting the number of authentication attempts
any single IP can perform in a given time. This is achieved by 2 actions. By
ensuring that :code:`MaxAuthTries` in your sshd config is not set too high (the
default is 6 which is damn reasonable) and by limiting the number of TCP
connections to port 22 any IP can initiate. With UFW on Linux this is done by
running :code:`ufw limit ssh` and for OpenBSD or FreeBSD I'd refer you to Peter
Hansteen's `great PF tutorial
`_.
These 2 steps will create a barrier to entry that no brute-force attack will be
able to overcome in anyone's lifetime. This is what I do on all of my servers
and it has served me well. However, although the commonly used OpenSSH is
extensively used, researched and tested, bugs still happen. Keeping current
with updates is vital.
Also, some other good practices are disabling root login, SSH protocol version 1
is deprecated, insecure and must be turned off (is off by default, but I felt
it's worth mentioning). To further simplify things, here is a short Ansible
playbook that covers the actions mentioned above for Debian based systems.
.. code:: yaml
---
- hosts: all
handlers:
- name: Restart SSH
service:
name: ssh
state: restarted
tasks:
- name: APT install and update
with_items:
- openssh-server
- ufw
apt:
name: '{{ item }}'
state: latest
update_cache: yes
cache_valid_time: 3600
- name: Configure SSHd
with_dict:
MaxAuthTries: 10
PasswordAuthentication: no
PermitRootLogin: no
Protocol: 2
lineinfile:
dest: /etc/ssh/sshd_config
regexp: '{{ item.key }}'
line: '{{ item.key }} {{ item.value }}'
state: present
notify:
- Restart SSH
- name: Enable UFW
ufw:
state: enabled
- name: Rate limit SSH
ufw:
rule: limit
port: ssh
protocol: tcp
blog-b99513ebadc5f39c77d109597804b76b79e5a2c0/content/ssl.rst 0000664 0000000 0000000 00000007532 13744741101 0022420 0 ustar 00root root 0000000 0000000 SSL/TLS ciphers
===============
:date: 2014-07-12
:summary: Which ciphers to enable and in which order.
The problem at hand
-------------------
You have a website and you want to encrypt the traffic going in and out of your webserver. Since you heard about the attacks currently known at SSL and TLS, you want to configure your server to not be vulnerable to any. In a perfect world (or if you control your clients) all you have to do is allow TLS 1.2 and AES-GCM with elliptic-curve Diffie-Hellman key exchange only (AESGCM+ECDH when using openssl) and you're set. This combination is secure, fast, offers perfect forward secrecy and at the time of writing there are no known attacks that make it crackable in a reasonable time. So what's the problem? With a public website you don't control the web browser the visitor uses. If he or she is using IE on Windows XP or Android 2.x the browser doesn't support TLS 1.2 or AES-GCM and the visitor can't access the website. How do you keep your website secure yet reasonably accessible?
Known attacks on SSL and TLS
----------------------------
First, SSL 2.0 is insecure (it's even disabled by default in IE7) so we'll not be using it. Version roll back attacks allow a man in the middle to change the response from the client to force a lower grade (read the lowest grade possible) cipher suite.
The BEAST attack exploits a weakness in CBC ciphers in TLS 1.0. But fixes all major browsers have been released for quite some time, so we're going to assume that the client is secure and CBC ciphers are safe to use (reasonable assumption, but still an assumption).
CRIME and BREACH exploit a weakness in compression and RC4 is considered to be weak although not broken like DES or MD5.
IE in Windows XP
----------------
All version of IE that are available on Windows XP offer RC4 and 3DES as the best ciphers available. Unfortunately Chrome uses the Windows scrypt library so it has the same limitation. For a user this means that if you're on Windows XP you should be using an up-to-date version of Firefox to have the best experience until you can move from Windows XP (or Windows in general). For the website manager it leaves you with 2 options, either add support for either 3DES or RC4 ciphers with SHA1 hashes (for openssl, add RC4-SHA or 3DES-SHA at the end of the cipher list) or ask users to use Firefox if they're still on XP. I chose the latter rather then the former, but I have that luxury.
What are we left with?
----------------------
Since modern browser browsers that support SSL 3.0 support TLS 1.0, we'll be using TLS 1.0 or newer. Any AES cipher (AES-GCM preferred) with ECDH key exchange (preferred) or DH key exchange and SHA2 (preferred) or SHA1 hashes and disable compression. On my server (OpenBSD firewall/ load-balancer/ SSL terminator and reverse-proxy) with the included OpenSSL and Nginx the configuration is as followed ::
ssl_protocols TLSv1 TLSv1.1 TLSv1.2;
ssl_ciphers !kRSA:!3DES:!RC4:!DES:!MD5:!aNULL:!NULL:AESGCM+ECDH:AES256+ECDH:AES128:+SHA1;
ssl_prefer_server_ciphers on;
Take note that I first disable what I don't want, then allow what I do want in the order I prefer. I've also disabled DH key exchange with AES-GCM since all browsers that support AES-GCM support ECDH so I've opted for that (the reasoning being that ECDH is faster than DH so it's preferable).
Final words
-----------
This is not enough to call your site secure. I haven't mentioned secure cookies, HSTS, input sanitation, cross-site scripting, OCSP, certificate strength, implementation vulnerabilities (such as OpenSSL's heartbleed) or any of the other security considerations. For testing purposes I used `sslscan `_ and `Calomel's SSL validation add-on for Firefox `_. You can also `SSLLabs' SSL test `_.
blog-b99513ebadc5f39c77d109597804b76b79e5a2c0/content/static/ 0000775 0000000 0000000 00000000000 13744741101 0022345 5 ustar 00root root 0000000 0000000 blog-b99513ebadc5f39c77d109597804b76b79e5a2c0/content/static/nimrod.asc 0000664 0000000 0000000 00000004624 13744741101 0024333 0 ustar 00root root 0000000 0000000 -----BEGIN PGP PUBLIC KEY BLOCK-----
mQGNBF0jNJYBDADFGso8mCodmqsHyY4ubAQZ8D/g0i5hwZY6Jv9s9MhfnoMDC1F/
4c7S9IXodjmwA6ydoz5Zrjvmfn7THwIWh4zY88Z/ukUSZ8D1C3FFNw8BlauH5kB8
Lh86Y3SuHd8VyD8QyElOIfPp5OZyuWSJGH6YYuQZFsEkfbEwuowMd0LdMxbBVYal
oE0DNAtkSyexH+mVmnE8flPTWC45PXvmocOZWXU0rp0LIagIMKiw5a9yFyGUp1yk
+gUtezm2tftVqqn230DImtOnl+ra0pncHAZ5Bn/sPyXiFxQlEsrvix+xRsQQrl7z
BtZ8lD3qixEOc0+To0l0naKBlzLJ9knbJKcoFEgKiAdBQmAezOj/ec7KTrrZslVx
heu4qbydEkDuEWBEm220mV8sV3ts7LkTCAycm59p+RdjA4qHp+si8h61Zdi+Gr38
DXgO0mxznVE5LrDWAKVdPUfJ4r0nw4EyI1lCqRmk5ThxRIlQAvPz/ImLc6OSDE5d
gPfxSqgfuipfHcUAEQEAAbQgTmltcm9kIEFkYXIgPG5pbXJvZEBzaG9yZS5jby5p
bD6JAdQEEwEIAD4WIQTpMNVqvLxO4qiUfU+V5M0i0PTY1QUCXSM0lgIbAwUJA8Jn
AAULCQgHAwUVCgkICwUWAgMBAAIeAQIXgAAKCRCV5M0i0PTY1UcgC/4tmV2Ws8Bd
ZldV+jDP/usAeldx4BDlHpQ4GpVqOyjOj0ThIOF0qbV8T1fotQjLpKew+n88dl/U
uSp1zgCf8dwPisifrO8SazgIrTB/Y1IWcuepSfUHrBGo8BJ4IMahnljW1nDA2ORb
w09ksp+yEKzIwCK5rpdJz9H9L+/WtQE8WrQnTHA0fmx+AD7Pn1dzwsGpsQuMUFtu
ZWm0xe85U1lUUap9Z8r6hgcSpLtCin8oiDoQCp9PtFxJduMfdAX6HBBc4TcX105Y
HMVsEuk4mV2jESpBZqiV0RteExp3PwZzL9GzmoIm0FyTEUXHNfVbLXJWWgu4mboo
arurlX+CTQvUwIMxQQJWIyulK/e0U98lSn2YkhoovI4deujJnM8+h/90IJpT3psU
cFPMVSLiGYvpFy0ls2hcesvf8qPGT8YcrfCjfcsUejDlhDOeWMs/57SZScPHxfr6
lFDgKQVu7x28Z6nw9+M1TnGbZ17WARfefiWwFxbjbS/HuabFLgThdSS5AY0EXSM0
lgEMAMQOb/ETVF0hFxteAVOzWAQQ+U42sLOtafdbCEeh7tYTVqnrGc7LodiZGcNT
J4iNRreDVMAws6v/53MPHmKy/gpcmriEOdYnWX8JEiQo4EqlJtn3BQQ40hwIPojA
lWJBXvrkM1CtRPNgvt9XkuoAjFG+b48xkzzVWfCh5hY7TXQLpXDA50nW+PGbfAKm
tao+Dg6dVGhXH7oBMweNLvsFMd1CA3MzoN44BIKRUo+/Kw1qdC0Pen/KcsBY+/t1
Kv7tTTcbThfftyLUMzrS4oghuVL+kwiwmBMLpm8zOPDKxjM0CB8RhnbSw/LsRBPC
bqbT8NmEh2npYug2EVCNeKxcI0fiFpomSZBd1bATdeYpbftGEOp7XZj6RFHLtjA3
Ij/N21NOrWch+K6pyFA10CAS/Y/oM9TxEUVeiydJuvyyFHY68GvuSBwUbao+JsKV
tnqMqALGQOOcvsQTTCnKvJi5mo8eywKk69dwdHZ+Mmaedp5kKSVC5JLihugj697J
+E795wARAQABiQG8BBgBCAAmFiEE6TDVary8TuKolH1PleTNItD02NUFAl0jNJYC
GwwFCQPCZwAACgkQleTNItD02NVMHAwAiFF07LesyL/0BSuz0XhQvxMzOgGOvvUQ
c405GIk0fSUhBd9/iVZH7HDbH5o+ackAk4iHUc3g982Wesylur9oPcIf7p2FbZE/
UnFj/uDuKkGkf//jCDg04ZYi0sbFkIR7PvIcjjYb32agiWSvBWWA7npqSK/JWGWo
z6CMM1O5xInjQKRn/h6BkUStZPLpHPcAfVSV4DhTky0m8hEp//HUQOW24q7EQ3OW
Y6InZcwpBaQy86iYrwD/ZwUlEZcMN6hRvKxr7nFc20v0Xiz1R6ms+XDZPkzAg83k
9SoEdXD9NTI5WorUJM3kSIiaMOgfjSa253okVFobJkqpBcwhDivdVnvpRVoFHTqF
d46drUnr1OVctt/WkRglVVkGOH6MULK+0lNc/eBqLj6OWhOV/xNptM0ZNPjhXJMS
pdDhpqo+LMMOzC/zapgbfiW8ffdHW7C1Vx4bIi6Cpqxsz7vMdzzk67aJTMZ2zNBM
C2K3k+O4t9UZv793JnEGCCFmKQOZJ/DR
=ygn/
-----END PGP PUBLIC KEY BLOCK-----
blog-b99513ebadc5f39c77d109597804b76b79e5a2c0/content/static/policy.json 0000664 0000000 0000000 00000001304 13744741101 0024535 0 ustar 00root root 0000000 0000000 {
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Action": [
"iam:GetAccountPasswordPolicy",
"iam:ListAccount*",
"iam:GetAccountSummary",
"iam:GetAccountPasswordPolicy",
"iam:ListUsers"
],
"Resource": "*"
},
{
"Effect": "Allow",
"Action": [
"iam:ChangePassword",
"iam:*LoginProfile",
"iam:*AccessKey*",
"iam:*SSHPublicKey*"
],
"Resource": "arn:aws:iam:::user/${aws:username}"
}
]
}
blog-b99513ebadc5f39c77d109597804b76b79e5a2c0/content/static/runas 0000775 0000000 0000000 00000000546 13744741101 0023430 0 ustar 00root root 0000000 0000000 #!/bin/sh
set -eu
[ "$(id -u)" = "0" ] || { echo "Not running as root, continuing as the current user."; eval exec "$@"; }
command -v stat > /dev/null || { echo "Can't find stat, exiting."; exit 1; }
command -v gosu > /dev/null || { echo "Can't find gosu, exiting."; exit 1; }
uid="$(stat . -c '%u')"
gid="$(stat . -c '%g')"
eval exec gosu "$uid:$gid" "$@"
blog-b99513ebadc5f39c77d109597804b76b79e5a2c0/content/vbox_extenstions.rst 0000664 0000000 0000000 00000003311 13744741101 0025227 0 ustar 00root root 0000000 0000000 VirtualBox extensions
#####################
:date: 2017-01-17
:summary: Installing the VirtualBox extension pack
I happened to run into a Vagrant image today that required the USB2 controller
extension to VirtualBox. The procedure to install the extension pack was not
immediately clear to me.
Installing through the VirtualBox GUI
-------------------------------------
When trying to go through the VirtualBox GUI, the
application would close when I clicked on 'add new package' in the preferences
window. Guessing it had something to do with root privileges I tried to run the
GUI with :code:`sudo` but that failed because I'm using Wayland and it could
not find display :0. Using Gnome3, :code:`gksudo` is no longer available and
the replacement is :code:`pkexec` which uses PolicyKit. running :code:`pkexec
VirtualBox` opened the nice Gnome3 authentication prompt but resulted in the
same error. Giving up on starting the VirtualBox GUI as root, I moved my
efforts to the :code:`VBoxManage` CLI.
Installing with the VBoxManage CLI
----------------------------------
Here are the steps I took to successfully install the extension pack:
#. Find the version of VirtualBox you have installed by running
:code:`VBoxManage --version`.
#. Download the matching version of the extension pack from
https://www.virtualbox.org/wiki/Downloads if you're using the latest version
or from https://www.virtualbox.org/wiki/Download_Old_Builds if you're using
an older version.
#. Assuming you're using version 5.1.10, install the extension pack by running
:code:`VBoxManage extpack install
Oracle_VM_VirtualBox_Extension_Pack-5.1.10-112026.vbox-extpack`. For me,
this opened the same nice Gnome3 authentication prompt.
blog-b99513ebadc5f39c77d109597804b76b79e5a2c0/content/why-no-blogging.rst 0000664 0000000 0000000 00000002626 13744741101 0024625 0 ustar 00root root 0000000 0000000 Why I don't write on my blog often?
###################################
:date: 2015-11-10
:summary: Why I don't write on my blog often
I often criticize myself on not blogging more often. The process goes like this:
I'm doing something mildly interesting and I say to myself 'This is mildly
interesting, maybe someone else will find this mildly interesting.'. But
9 out of 10 times, what ever I'm doing has some code (when I say code I usually
mean an Ansible playbook, a shell script or something similar) accompanying.
Instead of a lengthy blog post, I publish a git repo. The repo has a :code:`README`
file, the code is documented, there's a :code:`Makefile` or :code:`fabfile`,
you can clone and fork the repo. It's almost always better than a blog post.
However now I have many repositories and just a few blog posts. What I'm going to do
from now on is I'll publish the git repo, but add a short post announcing the
repo.
ssl-ca
------
I'm announcing ssl-ca, a tool to generate a certificate authority, keys and
signed certificates. The main use case is an internal network (like a
development or staging environment, but not just) where you control all nodes.
For that goal, it's as close to a real CA as needed and somewhat secure. There's
no OCSP or CRL, the certs serial is random, but the default hash, bit length and
algorithms are modern and secure. You can get it at: https://www.shore.co.il/cgit/ssl-ca/about/.
blog-b99513ebadc5f39c77d109597804b76b79e5a2c0/fabfile.py 0000664 0000000 0000000 00000005160 13744741101 0021350 0 ustar 00root root 0000000 0000000 from fabric.api import lcd, env, local, hosts, warn_only
import fabric.contrib.project as project
import multiprocessing
import os
env.use_ssh_config = True
# Local path configuration (can be absolute or relative to fabfile)
env.deploy_path = 'output'
DEPLOY_PATH = env.deploy_path
# Remote server configuration
production = 'host01.shore.co.il'
dest_path = '/var/www/www.shore.co.il/blog/'
UID = os.getuid()
def clean():
"""Remove generated files"""
local(f'rm -rf __pycache__/ {DEPLOY_PATH}/* sitespeed-result/ lighthouse-result/')
def build():
"""Build local version of site"""
local('pelican -s pelicanconf.py')
def regenerate():
"""Automatically regenerate site upon file modification"""
local('pelican -D -r -s pelicanconf.py')
def serve():
"""Serve site at http://localhost:8080/"""
with lcd(DEPLOY_PATH):
local('python -m http.server 8080')
def dev():
"""Auto-regenerate files and serve at http://localhost:8080/"""
server_process = multiprocessing.Process(target=serve, daemon=True)
server_process.start()
regenerate()
server_process.terminate()
server_process.join(timeout=3)
def preview():
"""Build production version of site"""
local('pelican -s publishconf.py')
@hosts(production)
def publish():
"""Publish to production via rsync"""
preview()
project.rsync_project(
remote_dir=dest_path,
exclude=".DS_Store",
local_dir=DEPLOY_PATH.rstrip('/') + '/',
delete=True,
extra_opts='-c',
)
def sitespeed():
"""Run sitespeed test against the local dev server"""
build()
with warn_only():
local('docker run --rm --privileged --net=host gliderlabs/hostlocal')
server_process = multiprocessing.Process(target=serve, daemon=True)
server_process.start()
local(f'docker run --rm --shm-size=1g -u {UID} -v "$PWD:/sitespeed.io" sitespeedio/sitespeed.io http://169.254.255.254:8080/')
server_process.terminate()
server_process.join(timeout=3)
def lighthouse():
"""Run Chrome's Lighthouse report against the local dev server"""
build()
with warn_only():
local('docker run --rm --privileged --net=host gliderlabs/hostlocal')
server_process = multiprocessing.Process(target=serve, daemon=True)
server_process.start()
local('mkdir -p lighthouse-result')
local('docker run --rm -u {UID} -v "$PWD/lighthouse-result:/home/chrome/reports" --cap-add=SYS_ADMIN --user=1000 justinribeiro/lighthouse lighthouse --chrome-flags="--headless --no-sandbox --disable-gpu" http://169.254.255.254:8080/')
server_process.terminate()
server_process.join(timeout=3)
blog-b99513ebadc5f39c77d109597804b76b79e5a2c0/pelican-mockingbird/ 0000775 0000000 0000000 00000000000 13744741101 0023305 5 ustar 00root root 0000000 0000000 blog-b99513ebadc5f39c77d109597804b76b79e5a2c0/pelicanconf.py 0000664 0000000 0000000 00000001743 13744741101 0022244 0 ustar 00root root 0000000 0000000 #!/usr/bin/env python
# -*- coding: utf-8 -*- #
from __future__ import unicode_literals
AUTHOR = 'Nimrod Adar'
SITENAME = 'My notes and ramblings'
SITEURL = 'http://localhost:8080'
PATH = 'content'
TIMEZONE = 'Asia/Jerusalem'
DEFAULT_LANG = 'en'
# Feed generation is usually not desired when developing
FEED_ALL_ATOM = 'feeds/all.atom.xml'
CATEGORY_FEED_ATOM = None
TRANSLATION_FEED_ATOM = None
AUTHOR_FEED_ATOM = None
AUTHOR_FEED_RSS = None
# Blogroll
LINKS = ()
DEFAULT_PAGINATION = 3
# Uncomment following line if you want document-relative URLs when developing
RELATIVE_URLS = True
DIRECT_TEMPLATES = ('index', 'archives')
DISPLAY_CATEGORIES_ON_MENU = False
DISPLAY_PAGES_ON_MENU = True
MENUITEMS = (('Code', 'https://www.shore.co.il/git/'),)
THEME = "./pelican-mockingbird"
TAGS_SAVE_AS = ''
TAG_SAVE_AS = ''
STATIC_PATHS = ['static']
ARTICLE_URL = '{slug}/'
DELETE_OUTPUT_DIRECTORY = True
SLUGIFY_SOURCE = 'basename'
ARTICLE_SAVE_AS = '{slug}/index.html'
LOCALE = ('en_US.utf8')
blog-b99513ebadc5f39c77d109597804b76b79e5a2c0/publishconf.py 0000664 0000000 0000000 00000000543 13744741101 0022274 0 ustar 00root root 0000000 0000000 #!/usr/bin/env python
# -*- coding: utf-8 -*- #
from __future__ import unicode_literals
# This file is only used if you use `make publish` or
# explicitly specify it as your config file.
import os
import sys
sys.path.append(os.curdir)
from pelicanconf import *
SITEURL = 'https://www.shore.co.il/blog'
RELATIVE_URLS = False
PLUGINS = ('minification',)