Compare commits

...

233 Commits

Author SHA1 Message Date
a 1628f83514 Merge pull request '2.96' (#2) from github-mirrors/seaweedfs-operator:master into master
Reviewed-on: https://git.tuxpa.in/a/seaweedfs-operator/pulls/2
2022-03-30 03:24:54 +00:00
chrislu fbd37dd3eb 2.96 2022-03-27 18:58:34 -07:00
elee c6eaadf4af added timeout for do 2022-03-20 17:55:56 -05:00
elee 4d68bf590f more small changes, modify gitignore 2022-03-20 02:22:31 -05:00
elee 9c3d010b99 change certificate target 2022-03-20 02:11:05 -05:00
elee 675cbc0d03 remoting 2022-03-20 02:06:33 -05:00
a fb93d78b80 Merge pull request 'master' (#1) from github-mirrors/seaweedfs-operator:master into master
Reviewed-on: https://git.tuxpa.in/a/seaweedfs-operator/pulls/1
2022-03-20 07:06:19 +00:00
chrislu 7240de7226 2.93 2022-03-06 19:54:46 -08:00
chrislu 28cb170c5f 2.92 2022-02-28 15:55:13 -08:00
chrislu 63a2db701f 2.91 2022-02-27 12:50:25 -08:00
elee 774b444461 emable webhooks by default 2022-02-27 06:58:14 -06:00
chrislu ddd5521de2 avoid logs in tmp folder 2022-02-27 02:43:43 -08:00
chrislu dc13e7ca90 2.90 2022-02-20 23:00:30 -08:00
chrislu 0ccce292e0 2.89 2022-02-14 03:36:37 -08:00
chrislu 20b6cd7411 Merge branch 'master' of https://github.com/seaweedfs/seaweedfs-operator 2022-01-30 22:10:54 -08:00
chrislu 2b2737e627 2.88 2022-01-30 22:10:51 -08:00
Chris Lu 6e550b8d87
Merge pull request #58 from Kryptonite-RU/add-defaultreplication-cmdline 2022-01-26 10:10:12 -08:00
Grigoriy Narizhnyy aed8db644d Add defaultReplication command line parameter from CR value 2022-01-26 17:15:48 +03:00
chrislu 35eb4debb5 2.86 2022-01-18 09:47:40 -08:00
chrislu c329457293 2.84 2022-01-02 17:25:06 -08:00
chrislu 242fe939ce fix test 2021-12-25 12:44:41 -08:00
Chris Lu c23330d8af
Merge pull request #56 from robinbraemer/patch-1
update master StatefulSets replicas
2021-12-25 12:18:43 -08:00
Robin Brämer ec732cecaf
update master StatefulSets replicas 2021-12-25 20:34:28 +01:00
chrislu 061d8c0784 2.83 2021-12-25 01:27:48 -08:00
chrislu 0fbed44878 2.81 2021-12-05 18:21:34 -08:00
Chris Lu 654403bd17 2.80 2021-11-29 01:13:48 -08:00
Chris Lu cbb981aad9 2.77 2021-11-07 21:28:26 -08:00
Chris Lu d53367b5d3 fix compilation 2021-11-03 02:21:51 -07:00
Chris Lu f67be3d76c update module 2021-11-03 02:19:37 -07:00
Chris Lu 91b6927e12 go mod download cloud.google.com/go/storage 2021-11-03 02:03:53 -07:00
Chris Lu 624ef883e5 update go mod 2021-11-03 01:59:34 -07:00
Chris Lu 6c1647a778 sync with main repo 2021-11-03 01:44:06 -07:00
Chris Lu 3e43cc27b7 update to use latest grpc API 2021-11-03 01:38:38 -07:00
Chris Lu 179810df8e 2.76 2021-10-31 20:32:38 -07:00
Chris Lu 1384065a91 added apache 2.0 license
fix https://github.com/seaweedfs/seaweedfs-operator/issues/51
2021-10-20 11:33:21 -07:00
Chris Lu 1a5f79e36b 2.74 2021-10-18 21:17:56 -07:00
Chris Lu 39610ab6f6 2.72 2021-10-17 18:42:02 -07:00
Chris Lu 7b5c2aa103 2.71 2021-10-13 21:42:18 -07:00
Chris Lu aa5393d4c4 2.70 2021-09-26 17:38:25 -07:00
Chris Lu 23ec53e93d 2.69 2021-09-26 17:17:12 -07:00
Chris Lu 0c5179c19e
Merge pull request #50 from IxDay/preserve_unknown 2021-09-15 09:11:03 -07:00
Maxime Vidori 85c6271843 Add preserveUnknownFields to false 2021-09-15 12:49:33 +02:00
Chris Lu fddd24dfb6 2.68 2021-09-13 23:09:30 -07:00
Chris Lu de4505f59e 2.66 2021-09-06 10:53:02 -07:00
Chris Lu 2a72636987
Merge pull request #47 from arthurlogilab/patch-1
[manager] Bump up the limits and requests
2021-07-20 01:10:53 -07:00
Arthur Lutz cb1fb247c7
[manager] Bump up the limits and requests
Fixes #46
2021-07-20 10:08:54 +02:00
Chris Lu d1aeaa434c 2.59 2021-07-15 15:59:49 -07:00
Chris Lu 07abcff15a sync version to 2.58 2021-07-15 15:08:11 -07:00
Chris Lu 49c2775a5b skip test which is failing on etcd 2021-07-15 15:08:11 -07:00
Chris Lu 13f8916807
Create README.md 2021-07-15 14:01:24 -07:00
Chris Lu 5f492cba87
Update README.md 2021-07-15 14:01:14 -07:00
Chris Lu 86df57f471
Update README.md 2021-07-15 13:58:16 -07:00
Chris Lu 736b629071 2.56 2021-06-27 23:34:16 -07:00
thiscantbeserious 0909c8a62d
Update README.md 2021-06-25 11:12:19 +02:00
thiscantbeserious f3434d3873
Update README.md 2021-06-25 11:07:58 +02:00
thiscantbeserious 94b66c57d6
Update README.md 2021-06-25 11:06:51 +02:00
thiscantbeserious b891044c66
Update README.md 2021-06-25 11:06:25 +02:00
Chris Lu 682dcbae7c
Merge pull request #43 from seaweedfs/readme_adjustments
Readme adjustments
2021-06-25 01:55:10 -07:00
thiscantbeserious 45599dc10f
Update README.md 2021-06-25 10:17:25 +02:00
Chris Lu 936045790e 2.54 2021-06-19 03:57:23 -07:00
Chris Lu 8c7f7ea97b 2.53 2021-06-13 17:23:04 -07:00
thiscantbeserious 9623cd352e
Update README.md 2021-06-13 12:52:05 +02:00
thiscantbeserious e4e09b32b1
Update README.md 2021-06-10 19:06:00 +02:00
thiscantbeserious 9684d464be
Update README.md 2021-06-10 19:03:55 +02:00
Chris Lu 8d981305be
Update README.md 2021-06-09 13:11:02 -07:00
Chris Lu 297594054e
Update README.md 2021-06-09 13:10:46 -07:00
Chris Lu ca033321ad
Update README.md 2021-06-09 13:10:31 -07:00
Chris Lu feb26f3391
Update README.md 2021-06-09 13:07:56 -07:00
Chris Lu f2248552e5 simple change to test travis build 2021-06-09 12:32:27 -07:00
Chris Lu 9a83337903 Create .travis.yml 2021-06-09 12:22:10 -07:00
Chris Lu ce0ed06111 2.52 2021-06-07 14:06:26 -07:00
Chris Lu 3dd57e8048 2.51 2021-06-06 21:53:44 -07:00
Chris Lu 66f839a186 2.46 2021-05-10 22:36:08 -07:00
Chris Lu 81620f8522 2.44 2021-05-09 23:29:02 -07:00
Chris Lu 98713bbadc
Create main.yml 2021-05-06 11:04:02 -07:00
Chris Lu d9c33ee455 fix s3 ingress 2021-05-02 20:58:36 -07:00
Chris Lu b08d5ce9d1 2.43 2021-05-01 01:08:10 -07:00
Chris Lu 69935bd804 2.42 2021-04-30 11:56:51 -07:00
Chris Lu 1053ae7d62 fix typo
fix https://github.com/seaweedfs/seaweedfs-operator/issues/37
2021-04-25 21:54:38 -07:00
Chris Lu 42358f80d8
Merge pull request #36 from q8s-io/master 2021-04-25 12:00:00 -07:00
70data 2e1ad274a4 remove redis from go.mod 2021-04-26 00:25:46 +08:00
70data d82865f160 fix containerStatus.Ready 2021-04-26 00:14:41 +08:00
Chris Lu 2d063b6ad5
Merge pull request #34 from q8s-io/master
add make delete
2021-04-24 23:42:59 -07:00
70data 089913ba09 update 2021-04-25 13:34:01 +08:00
千夜 65a0950ef9
Merge pull request #1 from seaweedfs/master
2.41
2021-04-25 10:17:41 +08:00
Chris Lu 4233a8b38d 2.41 2021-04-24 16:54:51 -07:00
Chris Lu 6bde7140d5
Merge pull request #33 from kvaster/peers-namespace
Always use dns names with namespace
2021-04-23 17:31:39 -07:00
Viktor Kuzmin 10a3ab6de9 We should use names with namespace to allow direct fuse connections to volume servers for pods from other namespaces 2021-04-23 23:55:56 +03:00
Chris Lu 2284b6c617 2.40 2021-04-18 13:58:09 -07:00
Chris Lu 44d1ebfad1 2.39 2021-04-11 19:47:33 -07:00
Chris Lu 6dc9f6cb10 2.38 2021-04-05 19:42:06 -07:00
Chris Lu 9acefe82e6 2.37 2021-04-04 18:46:06 -07:00
Chris Lu 1cc1fe0407 2.36 2021-03-28 19:09:53 -07:00
Chris Lu 98492a3716 2.35 2021-03-22 00:05:32 -07:00
Chris Lu 51df653787 2.34 2021-03-16 03:02:06 -07:00
Chris Lu 3caaadad4c 2.33 2021-03-16 00:35:49 -07:00
Chris Lu 64a19b4f01 2.32 2021-03-14 21:31:40 -07:00
Chris Lu 00b81cfb2c 2.31 2021-03-09 12:52:25 -08:00
Chris Lu b8059368c2 2.30 2021-03-07 15:01:33 -08:00
Chris Lu 4d7b49a062 2.29 2021-02-28 18:08:43 -08:00
Chris Lu 33545b39d5 Update seaweedfs-operator.iml 2021-02-25 08:25:10 -08:00
Chris Lu a48007e6d9 2.28 2021-02-22 22:59:23 -08:00
Chris Lu 3679d1b118 2.27 2021-02-21 19:30:38 -08:00
Chris Lu 78bc8ad6d8 2.26 2021-02-15 13:39:30 -08:00
Chris Lu bf998078fc 2.25 2021-02-14 22:24:58 -08:00
Chris Lu 96d8d65530 2.24 2021-02-08 00:16:14 -08:00
Chris Lu a0e3c1a7a7 add group extensions for ingress 2021-02-07 03:10:42 -08:00
Chris Lu c081a57cf5 adjust docker image name 2021-02-06 00:28:05 -08:00
Chris Lu a3c2c3203c 2.23 2021-01-31 20:18:42 -08:00
Chris Lu 4599bd7866 2.21 2021-01-18 01:25:53 -08:00
Chris Lu 30058883f6 sync with seaweedfs 2.20 2021-01-09 23:19:40 -08:00
Chris Lu f493b8b0f2 2.20 2021-01-09 23:02:33 -08:00
Chris Lu de183328a5 2.13 2020-11-29 17:00:54 -08:00
Howard Lau 58df469ae5
Fix path 2020-11-28 09:31:41 +08:00
Howard Lau 9b3de719c3
Update README.md 2020-11-28 09:31:24 +08:00
Howard Lau b0f384e3e9
Disable webhooks by default 2020-11-28 09:28:07 +08:00
Howard Lau d86dd68fdd
Update README.md 2020-11-28 09:26:47 +08:00
Chris Lu fa393ffc32 2.12 2020-11-22 17:18:14 -08:00
Chris Lu 014ac322ce 2.11 2020-11-22 17:18:14 -08:00
Howard Lau 8f40aee634
Add instructions for end-user deployment 2020-11-15 11:18:19 +08:00
Chris Lu 62be67dfda Update Makefile 2020-11-14 13:31:54 -08:00
Chris Lu 0e602afa62 2.10 2020-11-10 23:17:34 -08:00
Chris Lu 2573216733 add ingress for volume servers
fix https://github.com/seaweedfs/seaweedfs-operator/issues/16
2020-11-10 00:11:17 -08:00
Chris Lu e853e2870f add service for each volume server
related to https://github.com/seaweedfs/seaweedfs-operator/issues/16
2020-11-09 23:48:22 -08:00
Chris Lu 15fc33c506 add back steps to develop outside of k8s 2020-11-09 23:28:13 -08:00
Chris Lu 98017e9cb0 fix seaweed client command 2020-11-08 23:59:02 -08:00
Chris Lu 831adc091f fix image comparison 2020-11-08 23:58:48 -08:00
Chris Lu ce9dc2bee8 Update README.md 2020-11-08 23:29:57 -08:00
Chris Lu 5a4f61591d adjust for tests 2020-11-08 23:02:09 -08:00
Chris Lu fab237e7ef fix expected master count 2020-11-08 22:34:04 -08:00
Chris Lu 6bb6a1bae6 for the quorum 2020-11-08 18:22:26 -08:00
Chris Lu cb86e4a770 ensure the image version matches expected version
fix https://github.com/seaweedfs/seaweedfs-operator/issues/25
2020-11-08 18:20:33 -08:00
Chris Lu 8342be9b1c proceed only when masters are all ready
fix https://github.com/seaweedfs/seaweedfs-operator/issues/21
2020-11-08 17:41:12 -08:00
Chris Lu 8585daff05 go fmt 2020-11-08 17:40:30 -08:00
Chris Lu 411cb7bc7d fix odd number of masters 2020-11-08 17:40:16 -08:00
Chris Lu b9377c0da1 adjust version 2020-11-08 17:40:00 -08:00
Chris Lu 9ba4b60d94 re-run reconcile every 5 seconds, to check node status changes 2020-11-08 17:00:41 -08:00
Chris Lu eccad4af1e unused functions 2020-11-08 16:46:16 -08:00
Chris Lu 771312ddfb add steps to develop inside k8s 2020-11-08 16:23:17 -08:00
Chris Lu b0f01a5e1a add namespace to master peers list 2020-11-08 16:12:31 -08:00
Chris Lu 3ecd205e8e add a seaweed admin
related to https://github.com/seaweedfs/seaweedfs-operator/issues/23
2020-11-08 00:55:56 -08:00
Chris Lu 291b479395 filer, volume stateful set detect changes 2020-11-04 19:01:19 -08:00
Chris Lu 95dc4a247d master statefulset ensure consistent style
fix https://github.com/seaweedfs/seaweedfs-operator/issues/19
2020-11-04 19:00:34 -08:00
Chris Lu 46806156b6 add filer and s3 ingress 2020-11-04 14:30:15 -08:00
Chris Lu 31c843cae4 set volumeSizeLimitMB to 1024 2020-11-03 23:15:57 -08:00
Chris Lu 9b29d3a377 add sample filer stor setting 2020-11-03 21:29:14 -08:00
Chris Lu 255a6487c7 fix filer configmap 2020-11-03 20:47:45 -08:00
Chris Lu db16a4b2cb minor 2020-11-03 20:26:15 -08:00
Chris Lu ac68889024 detect ENABLE_WEBHOOKS!=false 2020-11-03 20:24:19 -08:00
Chris Lu 561e7fcc3a add filer.toml to /etc/seaweedfs 2020-11-03 20:01:19 -08:00
Chris Lu 7399cc6e30 fix filer replica 2020-11-03 18:26:28 -08:00
Chris Lu eef877dacb 2.08 2020-11-01 13:04:12 -08:00
Chris Lu 10f6541376
Merge pull request #15 from howardlau1999/test
Add some basic tests
2020-11-01 02:29:38 -08:00
Howard Lau 647b74f909
fix wrong if
Signed-off-by: Howard Lau <howardlau1999@hotmail.com>
2020-11-01 09:34:52 +00:00
Howard Lau 152049b5cf
fix lint
Signed-off-by: Howard Lau <howardlau1999@hotmail.com>
2020-11-01 09:32:43 +00:00
Howard Lau 08430619b9
fix lint
Signed-off-by: Howard Lau <howardlau1999@hotmail.com>
2020-11-01 09:28:08 +00:00
Howard Lau 89bf4e4e0e
Add tests
Signed-off-by: Howard Lau <howardlau1999@hotmail.com>
2020-11-01 09:24:13 +00:00
Chris Lu 0a09f85cbd
Merge pull request #14 from howardlau1999/webhook
Add defaulting/validating webhook
2020-11-01 01:01:59 -08:00
Howard Lau 6437abce16
less verbose
Signed-off-by: Howard Lau <howardlau1999@hotmail.com>
2020-11-01 08:50:31 +00:00
Howard Lau 9d134da582
Aggregate errors
Signed-off-by: Howard Lau <howardlau1999@hotmail.com>
2020-11-01 08:21:01 +00:00
Howard Lau f0d0622b15 Add validation webhook
Signed-off-by: Howard Lau <howardlau1999@hotmail.com>
2020-11-01 08:16:22 +00:00
Howard Lau 2d2148e96c
Merge workflows
Signed-off-by: Howard Lau <howardlau1999@hotmail.com>
2020-11-01 07:02:22 +00:00
Howard Lau a1771c26cb
Fix workflow
Signed-off-by: Howard Lau <howardlau1999@hotmail.com>
2020-11-01 07:00:39 +00:00
Howard Lau 275515dffc
Split test steps
Signed-off-by: Howard Lau <howardlau1999@hotmail.com>
2020-11-01 06:59:41 +00:00
Howard Lau 7eed56f5eb Use Makefile test
Signed-off-by: Howard Lau <howardlau1999@hotmail.com>
2020-11-01 06:55:19 +00:00
Howard Lau f9b565e70a
Merge pull request #13 from howardlau1999/fix
Use Kubernetes recommended labels
2020-11-01 14:49:30 +08:00
Howard Lau 753e8e8c00
Add managed-by
Signed-off-by: Howard Lau <howardlau1999@hotmail.com>
2020-11-01 06:46:34 +00:00
Howard Lau eb051a3202
Use Kubernetes recommended labels
Signed-off-by: Howard Lau <howardlau1999@hotmail.com>
2020-11-01 06:43:45 +00:00
Howard Lau af2f704fa8
Merge pull request #12 from howardlau1999/fix
Fix make install
2020-11-01 13:02:43 +08:00
Howard Lau 7ccf5efcf1 Merge branch 'master' into fix 2020-11-01 04:40:41 +00:00
Howard Lau 6ae2bba475
Increase lint timeout
Signed-off-by: Howard Lau <howardlau1999@hotmail.com>
2020-10-30 15:59:29 +00:00
Howard Lau 034a36489d
Add golangci-lint
Signed-off-by: Howard Lau <howardlau1999@hotmail.com>
2020-10-30 15:55:35 +00:00
Chris Lu 740ac81a57
Merge pull request #11 from howardlau1999/filerpeer
Support Filer Peers
2020-10-30 02:55:13 -07:00
Howard Lau a9ea14b399
fix port
Signed-off-by: Howard Lau <howardlau1999@hotmail.com>
2020-10-30 09:23:54 +00:00
Howard Lau e8ba79aae6
add filer peers on filer startup
Signed-off-by: Howard Lau <howardlau1999@hotmail.com>
2020-10-30 09:15:16 +00:00
Howard Lau cc2c614d43
fix compatibility problem
Signed-off-by: Howard Lau <howardlau1999@hotmail.com>
2020-10-30 09:10:05 +00:00
Chris Lu ec5ccbf5c5
Merge pull request #10 from howardlau1999/peersvc
Add headless peer services for StatefulSet
2020-10-30 01:05:37 -07:00
Howard Lau cbd151aa2a
fix service
Signed-off-by: Howard Lau <howardlau1999@hotmail.com>
2020-10-30 07:14:24 +00:00
Howard Lau ee1d99b4ab
tidy go.mod go.sum
Signed-off-by: Howard Lau <howardlau1999@hotmail.com>
2020-10-30 07:11:27 +00:00
Howard Lau 2b486417bb
split comments
Signed-off-by: Howard Lau <howardlau1999@hotmail.com>
2020-10-30 07:09:39 +00:00
Howard Lau 223d464e45
do not sleep
Signed-off-by: Howard Lau <howardlau1999@hotmail.com>
2020-10-30 06:57:51 +00:00
Howard Lau 53536d56db
fix name
Signed-off-by: Howard Lau <howardlau1999@hotmail.com>
2020-10-30 06:57:02 +00:00
Howard Lau 7b11a10862
fix master name
Signed-off-by: Howard Lau <howardlau1999@hotmail.com>
2020-10-30 06:44:51 +00:00
Howard Lau a8c8fd65bc
add config
Signed-off-by: Howard Lau <howardlau1999@hotmail.com>
2020-10-30 06:42:00 +00:00
Howard Lau 5bc96e22b1 Merge branch 'master' into peersvc 2020-10-30 05:15:56 +00:00
Howard Lau 0bcbcd1d0b
Add verify codegen and manifests
Signed-off-by: Howard Lau <howardlau1999@hotmail.com>
2020-10-30 05:15:18 +00:00
Howard Lau 8c390ce083
fix float issues
Signed-off-by: Howard Lau <howardlau1999@hotmail.com>
2020-10-30 03:09:04 +00:00
Chris Lu 7f453bdf05
Create go.yml 2020-10-29 20:00:50 -07:00
Howard Lau 95b9af71fa
ownerReference for cm
Signed-off-by: Howard Lau <howardlau1999@hotmail.com>
2020-10-30 02:42:34 +00:00
Howard Lau e7b287bb6f Merge branch 'master' into peersvc 2020-10-30 02:40:34 +00:00
Chris Lu 4d2cd6ce12
Merge pull request #7 from howardlau1999/config
Support raw TOML config for master and filer
2020-10-29 08:52:56 -07:00
Chris Lu 9e229fc647
Merge pull request #9 from howardlau1999/owner
Set ownerReference for GC
2020-10-29 08:51:33 -07:00
Chris Lu 5341512045
Merge pull request #8 from howardlau1999/rbac
Fix RBAC problem and restore metrics
2020-10-29 08:48:58 -07:00
Howard Lau 0d53ed3402
Set ownerReference for GC
Signed-off-by: Howard Lau <howardlau1999@hotmail.com>
2020-10-29 14:31:06 +00:00
Howard Lau a762e9c866 Merge branch 'rbac' into peersvc 2020-10-29 12:00:40 +00:00
Howard Lau c5fbce3700
Make sure the operator is runnable
Signed-off-by: Howard Lau <howardlau1999@hotmail.com>
2020-10-29 11:56:59 +00:00
Howard Lau e8baea6ae4
fix rbac
Signed-off-by: Howard Lau <howardlau1999@hotmail.com>
2020-10-29 11:37:49 +00:00
Howard Lau 92172f43a5
restore metrics
Signed-off-by: Howard Lau <howardlau1999@hotmail.com>
2020-10-29 11:30:45 +00:00
Howard Lau 6c601549f9
rbac
Signed-off-by: Howard Lau <howardlau1999@hotmail.com>
2020-10-29 11:24:38 +00:00
Howard Lau 2c0d8fac02
fix
Signed-off-by: Howard Lau <howardlau1999@hotmail.com>
2020-10-29 09:03:30 +00:00
Howard Lau f8e325e446
create peer svc for sts
Signed-off-by: Howard Lau <howardlau1999@hotmail.com>
2020-10-29 08:57:16 +00:00
Howard Lau 3722dcdc0e
fix mountpath
Signed-off-by: Howard Lau <howardlau1999@hotmail.com>
2020-10-28 11:33:16 +00:00
Howard Lau a4b872fbbc
format
Signed-off-by: Howard Lau <howardlau1999@hotmail.com>
2020-10-28 11:31:02 +00:00
Howard Lau 6ee14a018f
Reconcile ConfigMap
Signed-off-by: Howard Lau <howardlau1999@hotmail.com>
2020-10-28 11:29:11 +00:00
Howard Lau 09d121bcb5
Support raw TOML config for master and filer
Signed-off-by: Howard Lau <howardlau1999@hotmail.com>
2020-10-28 11:19:17 +00:00
Chris Lu c06204b5b6
Merge pull request #5 from howardlau1999/refactor
Use accessor to merge config and extract magic numbers
2020-10-28 00:40:24 -07:00
Howard Lau d6e3cf7be9
use accessor to merge config and extract magic numbers
Signed-off-by: Howard Lau <howardlau1999@hotmail.com>
2020-10-28 07:06:37 +00:00
Chris Lu 1d54f9b9e3
Merge pull request #4 from howardlau1999/refactor
Refactor the CRD to allow users to configure each component separately
2020-10-27 22:16:19 -07:00
Howard Lau ef177093c7
remove unused field
Signed-off-by: Howard Lau <howardlau1999@hotmail.com>
2020-10-28 05:09:01 +00:00
Howard Lau 4f59aa4ada
refactor to multiple specs
Signed-off-by: Howard Lau <howardlau1999@hotmail.com>
2020-10-28 05:06:25 +00:00
Chris Lu 7e57102a24
Update README.md 2020-10-27 00:58:58 -07:00
Chris Lu 32b17068ca
Update README.md 2020-10-27 00:56:59 -07:00
Chris Lu 38c4d034d3
Update README.md 2020-10-27 00:54:25 -07:00
Chris Lu e557a193f5
Update README.md 2020-10-27 00:37:07 -07:00
Chris Lu 9df02c313d 2.07 2020-10-25 22:39:05 -07:00
Chris Lu 937ea45c04 add volume server disks 2020-10-20 00:17:14 -07:00
Chris Lu 80ba5abbae probe volume, filer. 2020-10-18 22:56:12 -07:00
Chris Lu 1a443616b6 master probe 2020-10-18 22:49:11 -07:00
Chris Lu 4b67dd3791 simplify 2020-10-18 01:59:38 -07:00
Chris Lu fb816896d7 disable metrics 2020-10-18 00:58:15 -07:00
Chris Lu 8ad7ee2d50 create ingress, simplify ensure filer statefulset 2020-10-18 00:12:33 -07:00
Chris Lu 9a4df148b8 skip metrics 2020-10-18 00:11:31 -07:00
Chris Lu f734443207 add debug 2020-10-18 00:06:49 -07:00
Chris Lu a5c5e85f2a simplify 2020-10-18 00:06:40 -07:00
Chris Lu 58aa518ba7 simplify 2020-10-17 20:52:13 -07:00
Chris Lu 4df48f08de adjust logs 2020-10-17 14:07:06 -07:00
Chris Lu 95164dc830 add filer nodeport service 2020-10-17 13:51:07 -07:00
Chris Lu b2274f19c1 2.05 2020-10-17 02:42:48 -07:00
Chris Lu 4bb6f99cd8 support upgrade 2020-10-17 02:25:23 -07:00
Chris Lu 4ea680c4c9 fix filer starting 2020-10-17 00:59:38 -07:00
Chris Lu a55feda96d support adjustable volume count and filer count 2020-10-16 23:25:46 -07:00
Chris Lu 30e457b814 merge s3 with filer 2020-10-14 21:53:09 -07:00
Chris Lu c7e0392c1c refactor 2020-10-14 21:45:53 -07:00
53 changed files with 7694 additions and 857 deletions

70
.github/workflows/go.yml vendored Normal file
View File

@ -0,0 +1,70 @@
name: Go
on:
push:
branches: [ master ]
pull_request:
branches: [ master ]
jobs:
lint:
name: Lint
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v2
- name: golangci-lint
uses: golangci/golangci-lint-action@v2
with:
# Required: the version of golangci-lint is required and must be specified without patch version: we always use the latest patch version.
version: v1.31
# Optional: working directory, useful for monorepos
# working-directory: somedir
# Optional: golangci-lint command line arguments.
# TODO: remove disabled
args: --timeout=10m -D errcheck -D deadcode -D unused
# Optional: show only new issues if it's a pull request. The default value is `false`.
# only-new-issues: true
build:
name: Build
runs-on: ubuntu-latest
steps:
- name: Set up Go 1.x
uses: actions/setup-go@v2
with:
go-version: ^1.13
- name: Check out code into the Go module directory
uses: actions/checkout@v2
- name: Get dependencies
run: |
go get -v -t -d ./...
if [ -f Gopkg.toml ]; then
curl https://raw.githubusercontent.com/golang/dep/master/install.sh | sh
dep ensure
fi
- name: Build
run: go build -v .
test:
name: Test
runs-on: ubuntu-latest
steps:
- name: Set up Go 1.x
uses: actions/setup-go@v2
with:
go-version: ^1.13
- name: Check out code into the Go module directory
uses: actions/checkout@v2
- name: Test
run: make test SHELL=/bin/bash

74
.github/workflows/main.yml vendored Normal file
View File

@ -0,0 +1,74 @@
# This is a basic workflow to help you get started with Actions
name: CI
# Controls when the action will run.
on:
# Triggers the workflow on push or pull request events but only for the master branch
push:
branches: [ master ]
pull_request:
branches: [ master ]
# Allows you to run this workflow manually from the Actions tab
workflow_dispatch:
# A workflow run is made up of one or more jobs that can run sequentially or in parallel
jobs:
# This workflow contains a single job called "build"
build:
# The type of runner that the job will run on
runs-on: ubuntu-latest
# Steps represent a sequence of tasks that will be executed as part of the job
steps:
# Checks-out your repository under $GITHUB_WORKSPACE, so your job can access it
- uses: actions/checkout@v2
# Runs a single command using the runners shell
- name: Run a one-line script
run: echo Hello, world!
- name: Build and push Docker images
# You may pin to the exact commit or the version.
# uses: docker/build-push-action@e1b7f96249f2e4c8e4ac1519b9608c0d48944a1f
uses: docker/build-push-action@v2.4.0
with:
# List of extra privileged entitlement (eg. network.host,security.insecure)
allow: # optional
# List of build-time variables
build-args: # optional
# Builder instance
builder: # optional
# Build's context is the set of files located in the specified PATH or URL
context: # optional
# Path to the Dockerfile
file: # optional
# List of metadata for an image
labels: # optional
# Load is a shorthand for --output=type=docker
load: # optional, default is false
# Set the networking mode for the RUN instructions during build
network: # optional
# Do not use cache when building the image
no-cache: # optional, default is false
# List of output destinations (format: type=local,dest=path)
outputs: # optional
# List of target platforms for build
platforms: # optional
# Always attempt to pull a newer version of the image
pull: # optional, default is false
# Push is a shorthand for --output=type=registry
push: # optional, default is false
# List of secrets to expose to the build (eg. key=string, GIT_AUTH_TOKEN=mytoken)
secrets: # optional
# List of secret files to expose to the build (eg. key=filename, MY_SECRET=./secret.txt)
secret-files: # optional
# List of SSH agent socket or keys to expose to the build
ssh: # optional
# List of tags
tags: # optional
# Sets the target stage to build
target: # optional
# GitHub Token used to authenticate against a repository for Git context
github-token: # optional, default is ${{ github.token }}

35
.github/workflows/verify.yml vendored Normal file
View File

@ -0,0 +1,35 @@
name: Verify
on:
push:
branches: [ master ]
pull_request:
branches: [ master ]
jobs:
verify:
name: Verify
runs-on: ubuntu-latest
steps:
- name: Set up Go 1.x
uses: actions/setup-go@v2
with:
go-version: ^1.13
- name: Check out code into the Go module directory
uses: actions/checkout@v2
- name: Get dependencies
run: |
go get -v -t -d ./...
if [ -f Gopkg.toml ]; then
curl https://raw.githubusercontent.com/golang/dep/master/install.sh | sh
dep ensure
fi
- name: Verify Codegen
run: hack/verify-codegen.sh
- name: Verify Manifests
run: hack/verify-manifests.sh

3
.gitignore vendored
View File

@ -79,3 +79,6 @@ tags
### GoLand ###
.idea
bin/*
## asdf
.tool-versions

11
.travis.yml Normal file
View File

@ -0,0 +1,11 @@
sudo: false
language: go
go:
- 1.16.x
before_install:
- export PATH=/home/travis/gopath/bin:$PATH
install:
- export CGO_ENABLED="0"
- go env

View File

@ -1,5 +1,5 @@
# Build the manager binary
FROM golang:1.13 as builder
FROM golang:1.16 as builder
WORKDIR /workspace
# Copy the Go Modules manifests

201
LICENSE Normal file
View File

@ -0,0 +1,201 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "{}"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright 2016 Chris Lu
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

View File

@ -1,5 +1,5 @@
# Current Operator version
VERSION ?= 0.0.1
VERSION ?= v0.0.2
# Default bundle image tag
BUNDLE_IMG ?= controller-bundle:$(VERSION)
# Options for 'bundle-build'
@ -12,9 +12,9 @@ endif
BUNDLE_METADATA_OPTS ?= $(BUNDLE_CHANNELS) $(BUNDLE_DEFAULT_CHANNEL)
# Image URL to use all building/pushing image targets
IMG ?= controller:latest
IMG ?= gfxlabs/seaweedfs-operator:$(VERSION)
# Produce CRDs that work back to Kubernetes 1.11 (no version conversion)
CRD_OPTIONS ?= "crd:trivialVersions=true"
CRD_OPTIONS ?= "crd:trivialVersions=true,preserveUnknownFields=false"
# Get the currently used golang install path (in GOPATH/bin, unless GOBIN is set)
ifeq (,$(shell go env GOBIN))
@ -26,8 +26,11 @@ endif
all: manager
# Run tests
ENVTEST_ASSETS_DIR=$(shell pwd)/testbin
test: generate fmt vet manifests
go test ./... -coverprofile cover.out
mkdir -p ${ENVTEST_ASSETS_DIR}
test -f ${ENVTEST_ASSETS_DIR}/setup-envtest.sh || curl -sSLo ${ENVTEST_ASSETS_DIR}/setup-envtest.sh https://raw.githubusercontent.com/kubernetes-sigs/controller-runtime/master/hack/setup-envtest.sh
source ${ENVTEST_ASSETS_DIR}/setup-envtest.sh; fetch_envtest_tools $(ENVTEST_ASSETS_DIR); setup_envtest_env $(ENVTEST_ASSETS_DIR); go test ./... -coverprofile cover.out
# Build manager binary
manager: generate fmt vet
@ -37,6 +40,10 @@ manager: generate fmt vet
run: generate fmt vet manifests
go run ./main.go
debug: generate fmt vet manifests
go build -gcflags="all=-N -l" ./main.go
ENABLE_WEBHOOKS=false dlv --listen=:2345 --headless=true --api-version=2 --accept-multiclient exec main
# Install CRDs into a cluster
install: manifests kustomize
$(KUSTOMIZE) build config/crd | kubectl apply -f -
@ -50,6 +57,10 @@ deploy: manifests kustomize
cd config/manager && $(KUSTOMIZE) edit set image controller=${IMG}
$(KUSTOMIZE) build config/default | kubectl apply -f -
# clean up crd & controller in the configured Kubernetes cluster in ~/.kube/config
delete: manifests kustomize
$(KUSTOMIZE) build config/default | kubectl delete -f -
# Generate manifests e.g. CRD, RBAC etc.
manifests: controller-gen
$(CONTROLLER_GEN) $(CRD_OPTIONS) rbac:roleName=manager-role webhook paths="./..." output:crd:artifacts:config=config/crd/bases
@ -67,7 +78,8 @@ generate: controller-gen
$(CONTROLLER_GEN) object:headerFile="hack/boilerplate.go.txt" paths="./..."
# Build the docker image
docker-build: test
docker-build: # test
echo ${IMG}
docker build . -t ${IMG}
# Push the docker image

View File

@ -1,6 +1,7 @@
domain: seaweedfs.com
layout: go.kubebuilder.io/v2
repo: github.com/seaweedfs/seaweedfs-operator
projectName: seaweedfs-operator
resources:
- group: seaweed
kind: Seaweed

140
README.md
View File

@ -1,7 +1,110 @@
[![Build Status](https://travis-ci.com/seaweedfs/seaweedfs-operator.svg?branch=master)](https://travis-ci.com/github/seaweedfs/seaweedfs-operator)
# SeaweedFS Operator
This [Kubernetes Operator](https://kubernetes.io/docs/concepts/extend-kubernetes/operator/) is made to easily deploy SeaweedFS onto your Kubernetes-Cluster.
The difference to [seaweedfs-csi-driver](https://github.com/seaweedfs/seaweedfs-csi-driver) is that the infrastructure (SeaweedFS) itself runs on Kubernetes as well (Master, Filer, Volume-Servers) and can as such easily scale with it as you need. It is also by far more resilent to failures then a simple systemD service in regards to handling crashing services or accidental deletes.
By using `make deploy` it will deploy a Resource of type 'Seaweed' onto your current kubectl $KUBECONFIG target (the operator itself) which by default will do nothing unless you configurate it (see examples in config/samples/).
Goals:
- [x] Automatically deploy and manage a SeaweedFS cluster.
- [x] Ability to be managed by other Operators.
- [ ] Compability with [seaweedfs-csi-driver](https://github.com/seaweedfs/seaweedfs-csi-driver)
- [x] Auto rolling upgrade and restart.
- [x] Ingress for volume server, filer and S3, to support HDFS, REST filer, S3 API and cross-cluster replication.
- [ ] Support all major cloud Kubernetes: AWS, Google, Azure.
- [ ] Scheduled backup to cloud storage: S3, Google Cloud Storage , Azure.
- [ ] Put warm data to cloud storage tier: S3, Google Cloud Storage , Azure.
- [ ] Grafana dashboard.
## Installation
This operator uses `kustomize` to deploy. The installation process will install one for you if you do not have one.
By default, the defaulting and validation webhooks are disabled. We strongly recommend that the webhooks be enabled.
First clone the repository:
```bash
$ git clone https://github.com/seaweedfs/seaweedfs-operator --depth=1
```
To deploy the operator with webhooks enabled, make sure you have installed the `cert-manager`(Installation docs: https://cert-manager.io/docs/installation/) in your cluster, then follow the instructions in the `config/default/kustomization.yaml` file to uncomment the components you need.
Lastly, change the value of `ENABLE_WEBHOOKS` to `"true"` in `config/manager/manager.yaml`
Afterwards fire up:
```bash
$ make install
```
Then run the command to deploy the operator into your cluster:
```bash
$ make deploy
```
Verify if it was correctly deployed with:
```bash
$ kubectl get pods --all-namespaces
```
Which may return:
```bash
NAMESPACE NAME READY STATUS RESTARTS AGE
kube-system coredns-f9fd979d6-68p4c 1/1 Running 0 34m
kube-system coredns-f9fd979d6-x992t 1/1 Running 0 34m
kube-system etcd-kind-control-plane 1/1 Running 0 34m
kube-system kindnet-rp7wr 1/1 Running 0 34m
kube-system kube-apiserver-kind-control-plane 1/1 Running 0 34m
kube-system kube-controller-manager-kind-control-plane 1/1 Running 0 34m
kube-system kube-proxy-dqfg2 1/1 Running 0 34m
kube-system kube-scheduler-kind-control-plane 1/1 Running 0 34m
local-path-storage local-path-provisioner-78776bfc44-7zvxx 1/1 Running 0 34m
seaweedfs-operator-system seaweedfs-operator-controller-manager-54cc768f4c-cwz2k 2/2 Running 0 34m
```
See the next section for example usage - **__at this point you only deployed the Operator itself!__**
### You need to also deploy an configuration to get it running (see next section)!
## Configuration Examples
- Please send us your use-cases / example configs ... this is currently empty (needs to be written)
- For now see: https://github.com/seaweedfs/seaweedfs-operator/blob/master/config/samples/seaweed_v1_seaweed.yaml
````
apiVersion: seaweed.seaweedfs.com/v1
kind: Seaweed
metadata:
name: seaweed1
namespace: default
spec:
# Add fields here
image: chrislusf/seaweedfs:2.96
volumeServerDiskCount: 1
hostSuffix: seaweed.abcdefg.com
master:
replicas: 3
volumeSizeLimitMB: 1024
volume:
replicas: 1
requests:
storage: 2Gi
filer:
replicas: 2
config: |
[leveldb2]
enabled = true
dir = "/data/filerldb2"
````
## Maintenance and Uninstallation
- TBD
## Development
Follow the instructions in https://sdk.operatorframework.io/docs/building-operators/golang/quickstart/
@ -10,6 +113,42 @@ Follow the instructions in https://sdk.operatorframework.io/docs/building-operat
$ git clone https://github.com/seaweedfs/seaweedfs-operator
$ cd seaweedfs-operator
# register the CRD with the Kubernetes
$ make deploy
# build the operator image
$ make docker-build
# load the image into Kind cluster
$ kind load docker-image chrislusf/seaweedfs-operator:v0.0.1
# From another terminal in the same directory
$ kubectl apply -f config/samples/seaweed_v1_seaweed.yaml
```
### Update the operator
```
# delete the existing operator
$ kubectl delete namespace seaweedfs-operator-system
# rebuild the operator image
$ make docker-build
# load the image into Kind cluster
$ kind load docker-image chrislusf/seaweedfs-operator:v0.0.1
# register the CRD with the Kubernetes
$ make deploy
```
### develop outside of k8s
```
$ git clone https://github.com/seaweedfs/seaweedfs-operator
$ cd seaweedfs-operator
# register the CRD with the Kubernetes
$ make install
@ -18,5 +157,4 @@ $ make run ENABLE_WEBHOOKS=false
# From another terminal in the same directory
$ kubectl apply -f config/samples/seaweed_v1_seaweed.yaml
```

View File

@ -0,0 +1,206 @@
package v1
import (
appsv1 "k8s.io/api/apps/v1"
corev1 "k8s.io/api/core/v1"
)
// ComponentAccessor is the interface to access component details, which respects the cluster-level properties
// and component-level overrides
// +kubebuilder:object:root=false
// +kubebuilder:object:generate=false
type ComponentAccessor interface {
ImagePullPolicy() corev1.PullPolicy
ImagePullSecrets() []corev1.LocalObjectReference
HostNetwork() bool
Affinity() *corev1.Affinity
PriorityClassName() *string
NodeSelector() map[string]string
Annotations() map[string]string
Tolerations() []corev1.Toleration
SchedulerName() string
DNSPolicy() corev1.DNSPolicy
BuildPodSpec() corev1.PodSpec
Env() []corev1.EnvVar
TerminationGracePeriodSeconds() *int64
StatefulSetUpdateStrategy() appsv1.StatefulSetUpdateStrategyType
}
type componentAccessorImpl struct {
imagePullPolicy corev1.PullPolicy
imagePullSecrets []corev1.LocalObjectReference
hostNetwork *bool
affinity *corev1.Affinity
priorityClassName *string
schedulerName string
clusterNodeSelector map[string]string
clusterAnnotations map[string]string
tolerations []corev1.Toleration
statefulSetUpdateStrategy appsv1.StatefulSetUpdateStrategyType
// ComponentSpec is the Component Spec
ComponentSpec *ComponentSpec
}
func (a *componentAccessorImpl) StatefulSetUpdateStrategy() appsv1.StatefulSetUpdateStrategyType {
strategy := a.ComponentSpec.StatefulSetUpdateStrategy
if len(strategy) != 0 {
return strategy
}
strategy = a.statefulSetUpdateStrategy
if len(strategy) != 0 {
return strategy
}
return appsv1.RollingUpdateStatefulSetStrategyType
}
func (a *componentAccessorImpl) ImagePullPolicy() corev1.PullPolicy {
pp := a.ComponentSpec.ImagePullPolicy
if pp == nil {
return a.imagePullPolicy
}
return *pp
}
func (a *componentAccessorImpl) ImagePullSecrets() []corev1.LocalObjectReference {
ips := a.ComponentSpec.ImagePullSecrets
if ips == nil {
return a.imagePullSecrets
}
return ips
}
func (a *componentAccessorImpl) HostNetwork() bool {
hostNetwork := a.ComponentSpec.HostNetwork
if hostNetwork == nil {
hostNetwork = a.hostNetwork
}
if hostNetwork == nil {
return false
}
return *hostNetwork
}
func (a *componentAccessorImpl) Affinity() *corev1.Affinity {
affi := a.ComponentSpec.Affinity
if affi == nil {
affi = a.affinity
}
return affi
}
func (a *componentAccessorImpl) PriorityClassName() *string {
pcn := a.ComponentSpec.PriorityClassName
if pcn == nil {
pcn = a.priorityClassName
}
return pcn
}
func (a *componentAccessorImpl) SchedulerName() string {
pcn := a.ComponentSpec.SchedulerName
if pcn == nil {
pcn = &a.schedulerName
}
return *pcn
}
func (a *componentAccessorImpl) NodeSelector() map[string]string {
sel := map[string]string{}
for k, v := range a.clusterNodeSelector {
sel[k] = v
}
for k, v := range a.ComponentSpec.NodeSelector {
sel[k] = v
}
return sel
}
func (a *componentAccessorImpl) Annotations() map[string]string {
anno := map[string]string{}
for k, v := range a.clusterAnnotations {
anno[k] = v
}
for k, v := range a.ComponentSpec.Annotations {
anno[k] = v
}
return anno
}
func (a *componentAccessorImpl) Tolerations() []corev1.Toleration {
tols := a.ComponentSpec.Tolerations
if len(tols) == 0 {
tols = a.tolerations
}
return tols
}
func (a *componentAccessorImpl) DNSPolicy() corev1.DNSPolicy {
dnsPolicy := corev1.DNSClusterFirst // same as kubernetes default
if a.HostNetwork() {
dnsPolicy = corev1.DNSClusterFirstWithHostNet
}
return dnsPolicy
}
func (a *componentAccessorImpl) BuildPodSpec() corev1.PodSpec {
spec := corev1.PodSpec{
SchedulerName: a.SchedulerName(),
Affinity: a.Affinity(),
NodeSelector: a.NodeSelector(),
HostNetwork: a.HostNetwork(),
RestartPolicy: corev1.RestartPolicyAlways,
Tolerations: a.Tolerations(),
}
if a.PriorityClassName() != nil {
spec.PriorityClassName = *a.PriorityClassName()
}
if a.ImagePullSecrets() != nil {
spec.ImagePullSecrets = a.ImagePullSecrets()
}
if a.TerminationGracePeriodSeconds() != nil {
spec.TerminationGracePeriodSeconds = a.TerminationGracePeriodSeconds()
}
return spec
}
func (a *componentAccessorImpl) Env() []corev1.EnvVar {
return a.ComponentSpec.Env
}
func (a *componentAccessorImpl) TerminationGracePeriodSeconds() *int64 {
return a.ComponentSpec.TerminationGracePeriodSeconds
}
func buildSeaweedComponentAccessor(spec *SeaweedSpec, componentSpec *ComponentSpec) ComponentAccessor {
return &componentAccessorImpl{
imagePullPolicy: spec.ImagePullPolicy,
imagePullSecrets: spec.ImagePullSecrets,
hostNetwork: spec.HostNetwork,
affinity: spec.Affinity,
schedulerName: spec.SchedulerName,
clusterNodeSelector: spec.NodeSelector,
clusterAnnotations: spec.Annotations,
tolerations: spec.Tolerations,
statefulSetUpdateStrategy: spec.StatefulSetUpdateStrategy,
ComponentSpec: componentSpec,
}
}
// BaseMasterSpec provides merged spec of masters
func (s *Seaweed) BaseMasterSpec() ComponentAccessor {
return buildSeaweedComponentAccessor(&s.Spec, &s.Spec.Master.ComponentSpec)
}
// BaseFilerSpec provides merged spec of filers
func (s *Seaweed) BaseFilerSpec() ComponentAccessor {
return buildSeaweedComponentAccessor(&s.Spec, &s.Spec.Filer.ComponentSpec)
}
// BaseVolumeSpec provides merged spec of volumes
func (s *Seaweed) BaseVolumeSpec() ComponentAccessor {
return buildSeaweedComponentAccessor(&s.Spec, &s.Spec.Volume.ComponentSpec)
}

View File

@ -17,12 +17,28 @@ limitations under the License.
package v1
import (
appsv1 "k8s.io/api/apps/v1"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
// EDIT THIS FILE! THIS IS SCAFFOLDING FOR YOU TO OWN!
// NOTE: json tags are required. Any new fields you add must have json tags for the fields to be serialized.
// Constants
const (
GRPCPortDelta = 10000
MasterHTTPPort = 9333
VolumeHTTPPort = 8444
FilerHTTPPort = 8888
FilerS3Port = 8333
MasterGRPCPort = MasterHTTPPort + GRPCPortDelta
VolumeGRPCPort = VolumeHTTPPort + GRPCPortDelta
FilerGRPCPort = FilerHTTPPort + GRPCPortDelta
)
// SeaweedSpec defines the desired state of Seaweed
type SeaweedSpec struct {
// INSERT ADDITIONAL SPEC FIELDS - desired state of cluster
@ -31,14 +47,60 @@ type SeaweedSpec struct {
// MetricsAddress is Prometheus gateway address
MetricsAddress string `json:"metricsAddress,omitempty"`
// VolumeServerCount is the number of volume servers, default to 1
VolumeServerCount int `json:"volumeServerCount,omitempty"`
// Image
Image string `json:"image,omitempty"`
// FilerCount is the number of filers, default to 1
FilerCount int `json:"filerCount,omitempty"`
// Version
Version string `json:"version,omitempty"`
// S3Count is the number of s3, default to 1
S3Count int `json:"s3Count,omitempty"`
// Master
Master *MasterSpec `json:"master,omitempty"`
// Volume
Volume *VolumeSpec `json:"volume,omitempty"`
// Filer
Filer *FilerSpec `json:"filer,omitempty"`
// SchedulerName of pods
SchedulerName string `json:"schedulerName,omitempty"`
// Persistent volume reclaim policy
PVReclaimPolicy *corev1.PersistentVolumeReclaimPolicy `json:"pvReclaimPolicy,omitempty"`
// ImagePullPolicy of pods
ImagePullPolicy corev1.PullPolicy `json:"imagePullPolicy,omitempty"`
// ImagePullSecrets is an optional list of references to secrets in the same namespace to use for pulling any of the images.
ImagePullSecrets []corev1.LocalObjectReference `json:"imagePullSecrets,omitempty"`
// Whether enable PVC reclaim for orphan PVC left by statefulset scale-in
EnablePVReclaim *bool `json:"enablePVReclaim,omitempty"`
// Whether Hostnetwork is enabled for pods
HostNetwork *bool `json:"hostNetwork,omitempty"`
// Affinity of pods
Affinity *corev1.Affinity `json:"affinity,omitempty"`
// Base node selectors of Pods, components may add or override selectors upon this respectively
NodeSelector map[string]string `json:"nodeSelector,omitempty"`
// Base annotations of Pods, components may add or override selectors upon this respectively
Annotations map[string]string `json:"annotations,omitempty"`
// Base tolerations of Pods, components may add more tolerations upon this respectively
Tolerations []corev1.Toleration `json:"tolerations,omitempty"`
// StatefulSetUpdateStrategy indicates the StatefulSetUpdateStrategy that will be
// employed to update Pods in the StatefulSet when a revision is made to
// Template.
StatefulSetUpdateStrategy appsv1.StatefulSetUpdateStrategyType `json:"statefulSetUpdateStrategy,omitempty"`
VolumeServerDiskCount int32 `json:"volumeServerDiskCount,omitempty"`
// Ingresses
HostSuffix *string `json:"hostSuffix,omitempty"`
}
// SeaweedStatus defines the observed state of Seaweed
@ -47,6 +109,140 @@ type SeaweedStatus struct {
// Important: Run "make" to regenerate code after modifying this file
}
// MasterSpec is the spec for masters
type MasterSpec struct {
ComponentSpec `json:",inline"`
corev1.ResourceRequirements `json:",inline"`
// The desired ready replicas
// +kubebuilder:validation:Minimum=1
Replicas int32 `json:"replicas"`
Service *ServiceSpec `json:"service,omitempty"`
// Config in raw toml string
Config *string `json:"config,omitempty"`
// Master-specific settings
VolumePreallocate *bool `json:"volumePreallocate,omitempty"`
VolumeSizeLimitMB *int32 `json:"volumeSizeLimitMB,omitempty"`
GarbageThreshold *string `json:"garbageThreshold,omitempty"`
PulseSeconds *int32 `json:"pulseSeconds,omitempty"`
DefaultReplication *string `json:"defaultReplication,omitempty"`
// only for testing
ConcurrentStart *bool `json:"concurrentStart,omitempty"`
}
// VolumeSpec is the spec for volume servers
type VolumeSpec struct {
ComponentSpec `json:",inline"`
corev1.ResourceRequirements `json:",inline"`
// The desired ready replicas
// +kubebuilder:validation:Minimum=1
Replicas int32 `json:"replicas"`
Service *ServiceSpec `json:"service,omitempty"`
StorageClassName *string `json:"storageClassName,omitempty"`
// Volume-specific settings
CompactionMBps *int32 `json:"compactionMBps,omitempty"`
FileSizeLimitMB *int32 `json:"fileSizeLimitMB,omitempty"`
FixJpgOrientation *bool `json:"fixJpgOrientation,omitempty"`
IdleTimeout *int32 `json:"idleTimeout,omitempty"`
MaxVolumeCounts *int32 `json:"maxVolumeCounts,omitempty"`
MinFreeSpacePercent *int32 `json:"minFreeSpacePercent,omitempty"`
}
// FilerSpec is the spec for filers
type FilerSpec struct {
ComponentSpec `json:",inline"`
corev1.ResourceRequirements `json:",inline"`
// The desired ready replicas
// +kubebuilder:validation:Minimum=1
Replicas int32 `json:"replicas"`
Service *ServiceSpec `json:"service,omitempty"`
// Config in raw toml string
Config *string `json:"config,omitempty"`
// Filer-specific settings
MaxMB *int32 `json:"maxMB,omitempty"`
}
// ComponentSpec is the base spec of each component, the fields should always accessed by the Basic<Component>Spec() method to respect the cluster-level properties
type ComponentSpec struct {
// Version of the component. Override the cluster-level version if non-empty
Version *string `json:"version,omitempty"`
// ImagePullPolicy of the component. Override the cluster-level imagePullPolicy if present
ImagePullPolicy *corev1.PullPolicy `json:"imagePullPolicy,omitempty"`
// ImagePullSecrets is an optional list of references to secrets in the same namespace to use for pulling any of the images.
ImagePullSecrets []corev1.LocalObjectReference `json:"imagePullSecrets,omitempty"`
// Whether Hostnetwork of the component is enabled. Override the cluster-level setting if present
HostNetwork *bool `json:"hostNetwork,omitempty"`
// Affinity of the component. Override the cluster-level one if present
Affinity *corev1.Affinity `json:"affinity,omitempty"`
// PriorityClassName of the component. Override the cluster-level one if present
PriorityClassName *string `json:"priorityClassName,omitempty"`
// SchedulerName of the component. Override the cluster-level one if present
SchedulerName *string `json:"schedulerName,omitempty"`
// NodeSelector of the component. Merged into the cluster-level nodeSelector if non-empty
NodeSelector map[string]string `json:"nodeSelector,omitempty"`
// Annotations of the component. Merged into the cluster-level annotations if non-empty
Annotations map[string]string `json:"annotations,omitempty"`
// Tolerations of the component. Override the cluster-level tolerations if non-empty
Tolerations []corev1.Toleration `json:"tolerations,omitempty"`
// List of environment variables to set in the container, like
// v1.Container.Env.
// Note that following env names cannot be used and may be overrided by operators
// - NAMESPACE
// - POD_IP
// - POD_NAME
Env []corev1.EnvVar `json:"env,omitempty"`
// Optional duration in seconds the pod needs to terminate gracefully. May be decreased in delete request.
// Value must be non-negative integer. The value zero indicates delete immediately.
// If this value is nil, the default grace period will be used instead.
// The grace period is the duration in seconds after the processes running in the pod are sent
// a termination signal and the time when the processes are forcibly halted with a kill signal.
// Set this value longer than the expected cleanup time for your process.
// Defaults to 30 seconds.
TerminationGracePeriodSeconds *int64 `json:"terminationGracePeriodSeconds,omitempty"`
// StatefulSetUpdateStrategy indicates the StatefulSetUpdateStrategy that will be
// employed to update Pods in the StatefulSet when a revision is made to
// Template.
StatefulSetUpdateStrategy appsv1.StatefulSetUpdateStrategyType `json:"statefulSetUpdateStrategy,omitempty"`
}
// ServiceSpec is a subset of the original k8s spec
type ServiceSpec struct {
// Type of the real kubernetes service
Type corev1.ServiceType `json:"type,omitempty"`
// Additional annotations of the kubernetes service object
Annotations map[string]string `json:"annotations,omitempty"`
// LoadBalancerIP is the loadBalancerIP of service
LoadBalancerIP *string `json:"loadBalancerIP,omitempty"`
// ClusterIP is the clusterIP of service
ClusterIP *string `json:"clusterIP,omitempty"`
}
// +kubebuilder:object:root=true
// +kubebuilder:subresource:status

93
api/v1/seaweed_webhook.go Normal file
View File

@ -0,0 +1,93 @@
/*
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1
import (
"errors"
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
"k8s.io/apimachinery/pkg/runtime"
utilerrors "k8s.io/apimachinery/pkg/util/errors"
ctrl "sigs.k8s.io/controller-runtime"
logf "sigs.k8s.io/controller-runtime/pkg/log"
"sigs.k8s.io/controller-runtime/pkg/webhook"
)
// log is for logging in this package.
var seaweedlog = logf.Log.WithName("seaweed-resource")
func (r *Seaweed) SetupWebhookWithManager(mgr ctrl.Manager) error {
return ctrl.NewWebhookManagedBy(mgr).
For(r).
Complete()
}
// EDIT THIS FILE! THIS IS SCAFFOLDING FOR YOU TO OWN!
// +kubebuilder:webhook:path=/mutate-seaweed-seaweedfs-com-v1-seaweed,mutating=true,failurePolicy=fail,groups=seaweed.seaweedfs.com,resources=seaweeds,verbs=create;update,versions=v1,name=mseaweed.kb.io
var _ webhook.Defaulter = &Seaweed{}
// Default implements webhook.Defaulter so a webhook will be registered for the type
func (r *Seaweed) Default() {
seaweedlog.Info("default", "name", r.Name)
// TODO(user): fill in your defaulting logic.
}
// TODO(user): change verbs to "verbs=create;update;delete" if you want to enable deletion validation.
// +kubebuilder:webhook:verbs=create;update,path=/validate-seaweed-seaweedfs-com-v1-seaweed,mutating=false,failurePolicy=fail,groups=seaweed.seaweedfs.com,resources=seaweeds,versions=v1,name=vseaweed.kb.io
var _ webhook.Validator = &Seaweed{}
// ValidateCreate implements webhook.Validator so a webhook will be registered for the type
func (r *Seaweed) ValidateCreate() error {
seaweedlog.Info("validate create", "name", r.Name)
errs := []error{}
// TODO(user): fill in your validation logic upon object creation.
if r.Spec.Master == nil {
errs = append(errs, errors.New("missing master spec"))
}
if r.Spec.Volume == nil {
errs = append(errs, errors.New("missing volume spec"))
} else {
if r.Spec.Volume.Requests[corev1.ResourceStorage].Equal(resource.MustParse("0")) {
errs = append(errs, errors.New("volume storage request cannot be zero"))
}
}
return utilerrors.NewAggregate(errs)
}
// ValidateUpdate implements webhook.Validator so a webhook will be registered for the type
func (r *Seaweed) ValidateUpdate(old runtime.Object) error {
seaweedlog.Info("validate update", "name", r.Name)
// TODO(user): fill in your validation logic upon object update.
return nil
}
// ValidateDelete implements webhook.Validator so a webhook will be registered for the type
func (r *Seaweed) ValidateDelete() error {
seaweedlog.Info("validate delete", "name", r.Name)
// TODO(user): fill in your validation logic upon object deletion.
return nil
}

View File

@ -1,3 +1,4 @@
//go:build !ignore_autogenerated
// +build !ignore_autogenerated
/*
@ -21,15 +22,188 @@ limitations under the License.
package v1
import (
runtime "k8s.io/apimachinery/pkg/runtime"
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/runtime"
)
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ComponentSpec) DeepCopyInto(out *ComponentSpec) {
*out = *in
if in.Version != nil {
in, out := &in.Version, &out.Version
*out = new(string)
**out = **in
}
if in.ImagePullPolicy != nil {
in, out := &in.ImagePullPolicy, &out.ImagePullPolicy
*out = new(corev1.PullPolicy)
**out = **in
}
if in.ImagePullSecrets != nil {
in, out := &in.ImagePullSecrets, &out.ImagePullSecrets
*out = make([]corev1.LocalObjectReference, len(*in))
copy(*out, *in)
}
if in.HostNetwork != nil {
in, out := &in.HostNetwork, &out.HostNetwork
*out = new(bool)
**out = **in
}
if in.Affinity != nil {
in, out := &in.Affinity, &out.Affinity
*out = new(corev1.Affinity)
(*in).DeepCopyInto(*out)
}
if in.PriorityClassName != nil {
in, out := &in.PriorityClassName, &out.PriorityClassName
*out = new(string)
**out = **in
}
if in.SchedulerName != nil {
in, out := &in.SchedulerName, &out.SchedulerName
*out = new(string)
**out = **in
}
if in.NodeSelector != nil {
in, out := &in.NodeSelector, &out.NodeSelector
*out = make(map[string]string, len(*in))
for key, val := range *in {
(*out)[key] = val
}
}
if in.Annotations != nil {
in, out := &in.Annotations, &out.Annotations
*out = make(map[string]string, len(*in))
for key, val := range *in {
(*out)[key] = val
}
}
if in.Tolerations != nil {
in, out := &in.Tolerations, &out.Tolerations
*out = make([]corev1.Toleration, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
if in.Env != nil {
in, out := &in.Env, &out.Env
*out = make([]corev1.EnvVar, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
if in.TerminationGracePeriodSeconds != nil {
in, out := &in.TerminationGracePeriodSeconds, &out.TerminationGracePeriodSeconds
*out = new(int64)
**out = **in
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ComponentSpec.
func (in *ComponentSpec) DeepCopy() *ComponentSpec {
if in == nil {
return nil
}
out := new(ComponentSpec)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *FilerSpec) DeepCopyInto(out *FilerSpec) {
*out = *in
in.ComponentSpec.DeepCopyInto(&out.ComponentSpec)
in.ResourceRequirements.DeepCopyInto(&out.ResourceRequirements)
if in.Service != nil {
in, out := &in.Service, &out.Service
*out = new(ServiceSpec)
(*in).DeepCopyInto(*out)
}
if in.Config != nil {
in, out := &in.Config, &out.Config
*out = new(string)
**out = **in
}
if in.MaxMB != nil {
in, out := &in.MaxMB, &out.MaxMB
*out = new(int32)
**out = **in
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FilerSpec.
func (in *FilerSpec) DeepCopy() *FilerSpec {
if in == nil {
return nil
}
out := new(FilerSpec)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *MasterSpec) DeepCopyInto(out *MasterSpec) {
*out = *in
in.ComponentSpec.DeepCopyInto(&out.ComponentSpec)
in.ResourceRequirements.DeepCopyInto(&out.ResourceRequirements)
if in.Service != nil {
in, out := &in.Service, &out.Service
*out = new(ServiceSpec)
(*in).DeepCopyInto(*out)
}
if in.Config != nil {
in, out := &in.Config, &out.Config
*out = new(string)
**out = **in
}
if in.VolumePreallocate != nil {
in, out := &in.VolumePreallocate, &out.VolumePreallocate
*out = new(bool)
**out = **in
}
if in.VolumeSizeLimitMB != nil {
in, out := &in.VolumeSizeLimitMB, &out.VolumeSizeLimitMB
*out = new(int32)
**out = **in
}
if in.GarbageThreshold != nil {
in, out := &in.GarbageThreshold, &out.GarbageThreshold
*out = new(string)
**out = **in
}
if in.PulseSeconds != nil {
in, out := &in.PulseSeconds, &out.PulseSeconds
*out = new(int32)
**out = **in
}
if in.DefaultReplication != nil {
in, out := &in.DefaultReplication, &out.DefaultReplication
*out = new(string)
**out = **in
}
if in.ConcurrentStart != nil {
in, out := &in.ConcurrentStart, &out.ConcurrentStart
*out = new(bool)
**out = **in
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MasterSpec.
func (in *MasterSpec) DeepCopy() *MasterSpec {
if in == nil {
return nil
}
out := new(MasterSpec)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *Seaweed) DeepCopyInto(out *Seaweed) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
out.Spec = in.Spec
in.Spec.DeepCopyInto(&out.Spec)
out.Status = in.Status
}
@ -86,6 +260,72 @@ func (in *SeaweedList) DeepCopyObject() runtime.Object {
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *SeaweedSpec) DeepCopyInto(out *SeaweedSpec) {
*out = *in
if in.Master != nil {
in, out := &in.Master, &out.Master
*out = new(MasterSpec)
(*in).DeepCopyInto(*out)
}
if in.Volume != nil {
in, out := &in.Volume, &out.Volume
*out = new(VolumeSpec)
(*in).DeepCopyInto(*out)
}
if in.Filer != nil {
in, out := &in.Filer, &out.Filer
*out = new(FilerSpec)
(*in).DeepCopyInto(*out)
}
if in.PVReclaimPolicy != nil {
in, out := &in.PVReclaimPolicy, &out.PVReclaimPolicy
*out = new(corev1.PersistentVolumeReclaimPolicy)
**out = **in
}
if in.ImagePullSecrets != nil {
in, out := &in.ImagePullSecrets, &out.ImagePullSecrets
*out = make([]corev1.LocalObjectReference, len(*in))
copy(*out, *in)
}
if in.EnablePVReclaim != nil {
in, out := &in.EnablePVReclaim, &out.EnablePVReclaim
*out = new(bool)
**out = **in
}
if in.HostNetwork != nil {
in, out := &in.HostNetwork, &out.HostNetwork
*out = new(bool)
**out = **in
}
if in.Affinity != nil {
in, out := &in.Affinity, &out.Affinity
*out = new(corev1.Affinity)
(*in).DeepCopyInto(*out)
}
if in.NodeSelector != nil {
in, out := &in.NodeSelector, &out.NodeSelector
*out = make(map[string]string, len(*in))
for key, val := range *in {
(*out)[key] = val
}
}
if in.Annotations != nil {
in, out := &in.Annotations, &out.Annotations
*out = make(map[string]string, len(*in))
for key, val := range *in {
(*out)[key] = val
}
}
if in.Tolerations != nil {
in, out := &in.Tolerations, &out.Tolerations
*out = make([]corev1.Toleration, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
if in.HostSuffix != nil {
in, out := &in.HostSuffix, &out.HostSuffix
*out = new(string)
**out = **in
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SeaweedSpec.
@ -112,3 +352,92 @@ func (in *SeaweedStatus) DeepCopy() *SeaweedStatus {
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ServiceSpec) DeepCopyInto(out *ServiceSpec) {
*out = *in
if in.Annotations != nil {
in, out := &in.Annotations, &out.Annotations
*out = make(map[string]string, len(*in))
for key, val := range *in {
(*out)[key] = val
}
}
if in.LoadBalancerIP != nil {
in, out := &in.LoadBalancerIP, &out.LoadBalancerIP
*out = new(string)
**out = **in
}
if in.ClusterIP != nil {
in, out := &in.ClusterIP, &out.ClusterIP
*out = new(string)
**out = **in
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceSpec.
func (in *ServiceSpec) DeepCopy() *ServiceSpec {
if in == nil {
return nil
}
out := new(ServiceSpec)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *VolumeSpec) DeepCopyInto(out *VolumeSpec) {
*out = *in
in.ComponentSpec.DeepCopyInto(&out.ComponentSpec)
in.ResourceRequirements.DeepCopyInto(&out.ResourceRequirements)
if in.Service != nil {
in, out := &in.Service, &out.Service
*out = new(ServiceSpec)
(*in).DeepCopyInto(*out)
}
if in.StorageClassName != nil {
in, out := &in.StorageClassName, &out.StorageClassName
*out = new(string)
**out = **in
}
if in.CompactionMBps != nil {
in, out := &in.CompactionMBps, &out.CompactionMBps
*out = new(int32)
**out = **in
}
if in.FileSizeLimitMB != nil {
in, out := &in.FileSizeLimitMB, &out.FileSizeLimitMB
*out = new(int32)
**out = **in
}
if in.FixJpgOrientation != nil {
in, out := &in.FixJpgOrientation, &out.FixJpgOrientation
*out = new(bool)
**out = **in
}
if in.IdleTimeout != nil {
in, out := &in.IdleTimeout, &out.IdleTimeout
*out = new(int32)
**out = **in
}
if in.MaxVolumeCounts != nil {
in, out := &in.MaxVolumeCounts, &out.MaxVolumeCounts
*out = new(int32)
**out = **in
}
if in.MinFreeSpacePercent != nil {
in, out := &in.MinFreeSpacePercent, &out.MinFreeSpacePercent
*out = new(int32)
**out = **in
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VolumeSpec.
func (in *VolumeSpec) DeepCopy() *VolumeSpec {
if in == nil {
return nil
}
out := new(VolumeSpec)
in.DeepCopyInto(out)
return out
}

View File

@ -1,8 +1,7 @@
# The following manifests contain a self-signed issuer CR and a certificate CR.
# More document can be found at https://docs.cert-manager.io
# WARNING: Targets CertManager 0.11 check https://docs.cert-manager.io/en/latest/tasks/upgrading/index.html for
# breaking changes
apiVersion: cert-manager.io/v1alpha2
# WARNING: Targets CertManager 1.7
apiVersion: cert-manager.io/v1
kind: Issuer
metadata:
name: selfsigned-issuer
@ -10,7 +9,7 @@ metadata:
spec:
selfSigned: {}
---
apiVersion: cert-manager.io/v1alpha2
apiVersion: cert-manager.io/v1
kind: Certificate
metadata:
name: serving-cert # this name should match the one appeared in kustomizeconfig.yaml

File diff suppressed because it is too large Load Diff

View File

@ -6,15 +6,8 @@ resources:
# +kubebuilder:scaffold:crdkustomizeresource
patchesStrategicMerge:
# [WEBHOOK] To enable webhook, uncomment all the sections with [WEBHOOK] prefix.
# patches here are for enabling the conversion webhook for each CRD
#- patches/webhook_in_seaweeds.yaml
# +kubebuilder:scaffold:crdkustomizewebhookpatch
# [CERTMANAGER] To enable webhook, uncomment all the sections with [CERTMANAGER] prefix.
# patches here are for enabling the CA injection for each CRD
#- patches/cainjection_in_seaweeds.yaml
# +kubebuilder:scaffold:crdkustomizecainjectionpatch
- patches/webhook_in_seaweeds.yaml
- patches/cainjection_in_seaweeds.yaml
# the following config is for teaching kustomize how to do kustomization for CRDs.
configurations:

View File

@ -16,55 +16,42 @@ bases:
- ../crd
- ../rbac
- ../manager
# [WEBHOOK] To enable webhook, uncomment all the sections with [WEBHOOK] prefix including the one in
# crd/kustomization.yaml
#- ../webhook
# [CERTMANAGER] To enable cert-manager, uncomment all sections with 'CERTMANAGER'. 'WEBHOOK' components are required.
#- ../certmanager
# [PROMETHEUS] To enable prometheus monitor, uncomment all sections with 'PROMETHEUS'.
#- ../prometheus
- ../webhook
- ../certmanager
- ../prometheus
patchesStrategicMerge:
# Protect the /metrics endpoint by putting it behind auth.
# If you want your controller-manager to expose the /metrics
# endpoint w/o any authn/z, please comment the following line.
- manager_auth_proxy_patch.yaml
- manager_webhook_patch.yaml
- webhookcainjection_patch.yaml
# [WEBHOOK] To enable webhook, uncomment all the sections with [WEBHOOK] prefix including the one in
# crd/kustomization.yaml
#- manager_webhook_patch.yaml
# [CERTMANAGER] To enable cert-manager, uncomment all sections with 'CERTMANAGER'.
# Uncomment 'CERTMANAGER' sections in crd/kustomization.yaml to enable the CA injection in the admission webhooks.
# 'CERTMANAGER' needs to be enabled to use ca injection
#- webhookcainjection_patch.yaml
# the following config is for teaching kustomize how to do var substitution
vars:
# [CERTMANAGER] To enable cert-manager, uncomment all sections with 'CERTMANAGER' prefix.
#- name: CERTIFICATE_NAMESPACE # namespace of the certificate CR
# objref:
# kind: Certificate
# group: cert-manager.io
# version: v1alpha2
# name: serving-cert # this name should match the one in certificate.yaml
# fieldref:
# fieldpath: metadata.namespace
#- name: CERTIFICATE_NAME
# objref:
# kind: Certificate
# group: cert-manager.io
# version: v1alpha2
# name: serving-cert # this name should match the one in certificate.yaml
#- name: SERVICE_NAMESPACE # namespace of the service
# objref:
# kind: Service
# version: v1
# name: webhook-service
# fieldref:
# fieldpath: metadata.namespace
#- name: SERVICE_NAME
# objref:
# kind: Service
# version: v1
# name: webhook-service
- name: CERTIFICATE_NAMESPACE # namespace of the certificate CR
objref:
kind: Certificate
group: cert-manager.io
version: v1alpha2
name: serving-cert # this name should match the one in certificate.yaml
fieldref:
fieldpath: metadata.namespace
- name: CERTIFICATE_NAME
objref:
kind: Certificate
group: cert-manager.io
version: v1alpha2
name: serving-cert # this name should match the one in certificate.yaml
- name: SERVICE_NAMESPACE # namespace of the service
objref:
kind: Service
version: v1
name: webhook-service
fieldref:
fieldpath: metadata.namespace
- name: SERVICE_NAME
objref:
kind: Service
version: v1
name: webhook-service

View File

@ -1,2 +1,8 @@
resources:
- manager.yaml
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
images:
- name: controller
newName: chrislusf/seaweedfs-operator
newTag: v0.0.1

View File

@ -28,12 +28,15 @@ spec:
args:
- --enable-leader-election
image: controller:latest
env:
- name: ENABLE_WEBHOOKS
value: "true"
name: manager
resources:
limits:
cpu: 100m
memory: 30Mi
memory: 100Mi
requests:
cpu: 100m
memory: 20Mi
memory: 50Mi
terminationGracePeriodSeconds: 10

View File

@ -6,6 +6,73 @@ metadata:
creationTimestamp: null
name: manager-role
rules:
- apiGroups:
- apps
resources:
- statefulsets
verbs:
- create
- delete
- get
- list
- patch
- update
- watch
- apiGroups:
- ""
resources:
- configmaps
verbs:
- create
- delete
- get
- list
- patch
- update
- watch
- apiGroups:
- ""
resources:
- pods
verbs:
- get
- list
- apiGroups:
- ""
resources:
- services
verbs:
- create
- delete
- get
- list
- patch
- update
- watch
- apiGroups:
- extensions
resources:
- ingresses
verbs:
- create
- delete
- get
- list
- patch
- update
- watch
- apiGroups:
- networking.k8s.io
resources:
- ingresses
verbs:
- create
- delete
- get
- list
- patch
- update
- watch
- apiGroups:
- seaweed.seaweedfs.com
resources:

View File

@ -2,6 +2,22 @@ apiVersion: seaweed.seaweedfs.com/v1
kind: Seaweed
metadata:
name: seaweed1
namespace: default
spec:
# Add fields here
foo: bar
image: chrislusf/seaweedfs:2.96
volumeServerDiskCount: 1
hostSuffix: seaweed.abcdefg.com
master:
replicas: 3
volumeSizeLimitMB: 1024
volume:
replicas: 1
requests:
storage: 2Gi
filer:
replicas: 2
config: |
[leveldb2]
enabled = true
dir = "/data/filerldb2"

View File

@ -0,0 +1,54 @@
---
apiVersion: admissionregistration.k8s.io/v1beta1
kind: MutatingWebhookConfiguration
metadata:
creationTimestamp: null
name: mutating-webhook-configuration
webhooks:
- clientConfig:
caBundle: Cg==
service:
name: webhook-service
namespace: system
path: /mutate-seaweed-seaweedfs-com-v1-seaweed
failurePolicy: Fail
name: mseaweed.kb.io
rules:
- apiGroups:
- seaweed.seaweedfs.com
apiVersions:
- v1
operations:
- CREATE
- UPDATE
resources:
- seaweeds
timeoutSeconds: 15
---
apiVersion: admissionregistration.k8s.io/v1beta1
kind: ValidatingWebhookConfiguration
metadata:
creationTimestamp: null
name: validating-webhook-configuration
webhooks:
- clientConfig:
caBundle: Cg==
service:
name: webhook-service
namespace: system
path: /validate-seaweed-seaweedfs-com-v1-seaweed
failurePolicy: Fail
name: vseaweed.kb.io
rules:
- apiGroups:
- seaweed.seaweedfs.com
apiVersions:
- v1
operations:
- CREATE
- UPDATE
resources:
- seaweeds
timeoutSeconds: 15

View File

@ -3,80 +3,106 @@ package controllers
import (
"context"
"k8s.io/apimachinery/pkg/runtime"
appsv1 "k8s.io/api/apps/v1"
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/types"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
seaweedv1 "github.com/seaweedfs/seaweedfs-operator/api/v1"
label "github.com/seaweedfs/seaweedfs-operator/controllers/label"
)
func (r *SeaweedReconciler) ensureFilerServers(seaweedCR *seaweedv1.Seaweed) (done bool, result ctrl.Result, err error) {
_ = context.Background()
_ = r.Log.WithValues("seaweed", seaweedCR.Name)
if done, result, err = r.ensureFilerStatefulSet(seaweedCR); done {
return done, result, err
if done, result, err = r.ensureFilerPeerService(seaweedCR); done {
return
}
if done, result, err = r.ensureFilerService(seaweedCR); done {
return done, result, err
return
}
return false, ctrl.Result{}, nil
if done, result, err = r.ensureFilerConfigMap(seaweedCR); done {
return
}
if done, result, err = r.ensureFilerStatefulSet(seaweedCR); done {
return
}
return
}
func (r *SeaweedReconciler) ensureFilerStatefulSet(seaweedCR *seaweedv1.Seaweed) (bool, ctrl.Result, error) {
ctx := context.Background()
log := r.Log.WithValues("sw-filer-statefulset", seaweedCR.Name)
filerStatefulSet := &appsv1.StatefulSet{}
err := r.Get(ctx, types.NamespacedName{Name: seaweedCR.Name + "-filer", Namespace: seaweedCR.Namespace}, filerStatefulSet)
if err != nil && errors.IsNotFound(err) {
// Define a new deployment
dep := r.createFilerStatefulSet(seaweedCR)
log.Info("Creating a new filer statefulset", "Namespace", dep.Namespace, "Name", dep.Name)
err = r.Create(ctx, dep)
if err != nil {
log.Error(err, "Failed to create new filer statefulset", "Namespace", dep.Namespace, "Name", dep.Name)
return true, ctrl.Result{}, err
}
// Deployment created successfully - return and requeue
return false, ctrl.Result{}, nil
} else if err != nil {
log.Error(err, "Failed to get filer statefulset")
return true, ctrl.Result{}, err
filerStatefulSet := r.createFilerStatefulSet(seaweedCR)
if err := controllerutil.SetControllerReference(seaweedCR, filerStatefulSet, r.Scheme); err != nil {
return ReconcileResult(err)
}
log.Info("Get filer stateful set " + filerStatefulSet.Name)
return false, ctrl.Result{}, nil
_, err := r.CreateOrUpdate(filerStatefulSet, func(existing, desired runtime.Object) error {
existingStatefulSet := existing.(*appsv1.StatefulSet)
desiredStatefulSet := desired.(*appsv1.StatefulSet)
existingStatefulSet.Spec.Replicas = desiredStatefulSet.Spec.Replicas
existingStatefulSet.Spec.Template.Spec = desiredStatefulSet.Spec.Template.Spec
return nil
})
log.Info("ensure filer stateful set " + filerStatefulSet.Name)
return ReconcileResult(err)
}
func (r *SeaweedReconciler) ensureFilerPeerService(seaweedCR *seaweedv1.Seaweed) (bool, ctrl.Result, error) {
log := r.Log.WithValues("sw-filer-peer-service", seaweedCR.Name)
filerPeerService := r.createFilerPeerService(seaweedCR)
if err := controllerutil.SetControllerReference(seaweedCR, filerPeerService, r.Scheme); err != nil {
return ReconcileResult(err)
}
_, err := r.CreateOrUpdateService(filerPeerService)
log.Info("ensure filer peer service " + filerPeerService.Name)
return ReconcileResult(err)
}
func (r *SeaweedReconciler) ensureFilerService(seaweedCR *seaweedv1.Seaweed) (bool, ctrl.Result, error) {
ctx := context.Background()
log := r.Log.WithValues("sw-filer-service", seaweedCR.Name)
volumeServerService := &corev1.Service{}
err := r.Get(ctx, types.NamespacedName{Name: seaweedCR.Name + "-filer", Namespace: seaweedCR.Namespace}, volumeServerService)
if err != nil && errors.IsNotFound(err) {
// Define a new deployment
dep := r.createFilerService(seaweedCR)
log.Info("Creating a new filer service", "Namespace", dep.Namespace, "Name", dep.Name)
err = r.Create(ctx, dep)
if err != nil {
log.Error(err, "Failed to create new filer service", "Namespace", dep.Namespace, "Name", dep.Name)
return true, ctrl.Result{}, err
}
// Deployment created successfully - return and requeue
return false, ctrl.Result{}, nil
} else if err != nil {
log.Error(err, "Failed to get filer server service")
return true, ctrl.Result{}, err
filerService := r.createFilerService(seaweedCR)
if err := controllerutil.SetControllerReference(seaweedCR, filerService, r.Scheme); err != nil {
return ReconcileResult(err)
}
log.Info("Get filer service " + volumeServerService.Name)
return false, ctrl.Result{}, nil
_, err := r.CreateOrUpdateService(filerService)
log.Info("ensure filer service " + filerService.Name)
return ReconcileResult(err)
}
func (r *SeaweedReconciler) ensureFilerConfigMap(seaweedCR *seaweedv1.Seaweed) (bool, ctrl.Result, error) {
log := r.Log.WithValues("sw-filer-configmap", seaweedCR.Name)
filerConfigMap := r.createFilerConfigMap(seaweedCR)
if err := controllerutil.SetControllerReference(seaweedCR, filerConfigMap, r.Scheme); err != nil {
return ReconcileResult(err)
}
_, err := r.CreateOrUpdateConfigMap(filerConfigMap)
log.Info("Get filer ConfigMap " + filerConfigMap.Name)
return ReconcileResult(err)
}
func labelsForFiler(name string) map[string]string {
return map[string]string{"app": "seaweedfs", "role": "filer", "name": name}
return map[string]string{
label.ManagedByLabelKey: "seaweedfs-operator",
label.NameLabelKey: "seaweedfs",
label.ComponentLabelKey: "filer",
label.InstanceLabelKey: name,
}
}

View File

@ -0,0 +1,29 @@
package controllers
import (
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
seaweedv1 "github.com/seaweedfs/seaweedfs-operator/api/v1"
)
func (r *SeaweedReconciler) createFilerConfigMap(m *seaweedv1.Seaweed) *corev1.ConfigMap {
labels := labelsForFiler(m.Name)
toml := ""
if m.Spec.Filer.Config != nil {
toml = *m.Spec.Filer.Config
}
dep := &corev1.ConfigMap{
ObjectMeta: metav1.ObjectMeta{
Name: m.Name + "-filer",
Namespace: m.Namespace,
Labels: labels,
},
Data: map[string]string{
"filer.toml": toml,
},
}
return dep
}

View File

@ -0,0 +1,85 @@
package controllers
import (
"fmt"
extensionsv1beta1 "k8s.io/api/extensions/v1beta1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/intstr"
ctrl "sigs.k8s.io/controller-runtime"
seaweedv1 "github.com/seaweedfs/seaweedfs-operator/api/v1"
)
func (r *SeaweedReconciler) createAllIngress(m *seaweedv1.Seaweed) *extensionsv1beta1.Ingress {
labels := labelsForIngress(m.Name)
dep := &extensionsv1beta1.Ingress{
ObjectMeta: metav1.ObjectMeta{
Name: m.Name + "-ingress",
Namespace: m.Namespace,
Labels: labels,
},
Spec: extensionsv1beta1.IngressSpec{
// TLS: ingressSpec.TLS,
Rules: []extensionsv1beta1.IngressRule{
{
Host: "filer." + *m.Spec.HostSuffix,
IngressRuleValue: extensionsv1beta1.IngressRuleValue{
HTTP: &extensionsv1beta1.HTTPIngressRuleValue{
Paths: []extensionsv1beta1.HTTPIngressPath{
{
Path: "/",
Backend: extensionsv1beta1.IngressBackend{
ServiceName: m.Name + "-filer",
ServicePort: intstr.FromInt(seaweedv1.FilerHTTPPort),
},
},
},
},
},
},
{
Host: "s3." + *m.Spec.HostSuffix,
IngressRuleValue: extensionsv1beta1.IngressRuleValue{
HTTP: &extensionsv1beta1.HTTPIngressRuleValue{
Paths: []extensionsv1beta1.HTTPIngressPath{
{
Path: "/",
Backend: extensionsv1beta1.IngressBackend{
ServiceName: m.Name + "-filer",
ServicePort: intstr.FromInt(seaweedv1.FilerS3Port),
},
},
},
},
},
},
},
},
}
// add ingress for volume servers
for i := 0; i < int(m.Spec.Volume.Replicas); i++ {
dep.Spec.Rules = append(dep.Spec.Rules, extensionsv1beta1.IngressRule{
Host: fmt.Sprintf("%s-volume-%d.%s", m.Name, i, *m.Spec.HostSuffix),
IngressRuleValue: extensionsv1beta1.IngressRuleValue{
HTTP: &extensionsv1beta1.HTTPIngressRuleValue{
Paths: []extensionsv1beta1.HTTPIngressPath{
{
Path: "/",
Backend: extensionsv1beta1.IngressBackend{
ServiceName: fmt.Sprintf("%s-volume-%d", m.Name, i),
ServicePort: intstr.FromInt(seaweedv1.VolumeHTTPPort),
},
},
},
},
},
})
}
// Set master instance as the owner and controller
ctrl.SetControllerReference(m, dep, r.Scheme)
return dep
}

View File

@ -8,6 +8,47 @@ import (
seaweedv1 "github.com/seaweedfs/seaweedfs-operator/api/v1"
)
func (r *SeaweedReconciler) createFilerPeerService(m *seaweedv1.Seaweed) *corev1.Service {
labels := labelsForFiler(m.Name)
dep := &corev1.Service{
ObjectMeta: metav1.ObjectMeta{
Name: m.Name + "-filer-peer",
Namespace: m.Namespace,
Labels: labels,
Annotations: map[string]string{
"service.alpha.kubernetes.io/tolerate-unready-endpoints": "true",
},
},
Spec: corev1.ServiceSpec{
ClusterIP: "None",
PublishNotReadyAddresses: true,
Ports: []corev1.ServicePort{
{
Name: "filer-http",
Protocol: corev1.Protocol("TCP"),
Port: seaweedv1.FilerHTTPPort,
TargetPort: intstr.FromInt(seaweedv1.FilerHTTPPort),
},
{
Name: "filer-grpc",
Protocol: corev1.Protocol("TCP"),
Port: seaweedv1.FilerGRPCPort,
TargetPort: intstr.FromInt(seaweedv1.FilerGRPCPort),
},
{
Name: "filer-s3",
Protocol: corev1.Protocol("TCP"),
Port: seaweedv1.FilerS3Port,
TargetPort: intstr.FromInt(seaweedv1.FilerS3Port),
},
},
Selector: labels,
},
}
return dep
}
func (r *SeaweedReconciler) createFilerService(m *seaweedv1.Seaweed) *corev1.Service {
labels := labelsForFiler(m.Name)
@ -21,30 +62,47 @@ func (r *SeaweedReconciler) createFilerService(m *seaweedv1.Seaweed) *corev1.Ser
},
},
Spec: corev1.ServiceSpec{
ClusterIP: "None",
Type: corev1.ServiceTypeClusterIP,
PublishNotReadyAddresses: true,
Ports: []corev1.ServicePort{
{
Name: "swfs-filer",
Protocol: corev1.Protocol("TCP"),
Port: 8888,
TargetPort: intstr.IntOrString{
Type: intstr.Int,
IntVal: 8888,
},
Name: "filer-http",
Protocol: corev1.Protocol("TCP"),
Port: seaweedv1.FilerHTTPPort,
TargetPort: intstr.FromInt(seaweedv1.FilerHTTPPort),
},
{
Name: "swfs-volume-grpc",
Protocol: corev1.Protocol("TCP"),
Port: 18888,
TargetPort: intstr.IntOrString{
Type: intstr.Int,
IntVal: 18888,
},
Name: "filer-grpc",
Protocol: corev1.Protocol("TCP"),
Port: seaweedv1.FilerGRPCPort,
TargetPort: intstr.FromInt(seaweedv1.FilerGRPCPort),
},
{
Name: "filer-s3",
Protocol: corev1.Protocol("TCP"),
Port: seaweedv1.FilerS3Port,
TargetPort: intstr.FromInt(seaweedv1.FilerS3Port),
},
},
Selector: labels,
},
}
if m.Spec.Filer.Service != nil {
svcSpec := m.Spec.Filer.Service
dep.Annotations = copyAnnotations(svcSpec.Annotations)
if svcSpec.Type != "" {
dep.Spec.Type = svcSpec.Type
}
if svcSpec.ClusterIP != nil {
dep.Spec.ClusterIP = *svcSpec.ClusterIP
}
if svcSpec.LoadBalancerIP != nil {
dep.Spec.LoadBalancerIP = *svcSpec.LoadBalancerIP
}
}
return dep
}

View File

@ -2,27 +2,114 @@ package controllers
import (
"fmt"
"strings"
appsv1 "k8s.io/api/apps/v1"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/intstr"
seaweedv1 "github.com/seaweedfs/seaweedfs-operator/api/v1"
)
func buildFilerStartupScript(m *seaweedv1.Seaweed) string {
commands := []string{"weed", "-logtostderr=true", "filer"}
commands = append(commands, fmt.Sprintf("-port=%d", seaweedv1.FilerHTTPPort))
commands = append(commands, fmt.Sprintf("-ip=$(POD_NAME).%s-filer-peer.%s", m.Name, m.Namespace))
commands = append(commands, fmt.Sprintf("-master=%s", getMasterPeersString(m)))
commands = append(commands, "-s3")
return strings.Join(commands, " ")
}
func (r *SeaweedReconciler) createFilerStatefulSet(m *seaweedv1.Seaweed) *appsv1.StatefulSet {
labels := labelsForFiler(m.Name)
replicas := int32(m.Spec.FilerCount)
replicas := int32(m.Spec.Filer.Replicas)
rollingUpdatePartition := int32(0)
enableServiceLinks := false
filerPodSpec := m.BaseFilerSpec().BuildPodSpec()
filerPodSpec.Volumes = []corev1.Volume{
{
Name: "filer-config",
VolumeSource: corev1.VolumeSource{
ConfigMap: &corev1.ConfigMapVolumeSource{
LocalObjectReference: corev1.LocalObjectReference{
Name: m.Name + "-filer",
},
},
},
},
}
filerPodSpec.EnableServiceLinks = &enableServiceLinks
filerPodSpec.Containers = []corev1.Container{{
Name: "filer",
Image: m.Spec.Image,
ImagePullPolicy: m.BaseFilerSpec().ImagePullPolicy(),
Env: append(m.BaseFilerSpec().Env(), kubernetesEnvVars...),
VolumeMounts: []corev1.VolumeMount{
{
Name: "filer-config",
ReadOnly: true,
MountPath: "/etc/seaweedfs",
},
},
Command: []string{
"/bin/sh",
"-ec",
buildFilerStartupScript(m),
},
Ports: []corev1.ContainerPort{
{
ContainerPort: seaweedv1.FilerHTTPPort,
Name: "filer-http",
},
{
ContainerPort: seaweedv1.FilerGRPCPort,
Name: "filer-grpc",
},
{
ContainerPort: seaweedv1.FilerS3Port,
Name: "filer-s3",
},
},
ReadinessProbe: &corev1.Probe{
Handler: corev1.Handler{
HTTPGet: &corev1.HTTPGetAction{
Path: "/",
Port: intstr.FromInt(seaweedv1.FilerHTTPPort),
Scheme: corev1.URISchemeHTTP,
},
},
InitialDelaySeconds: 10,
TimeoutSeconds: 3,
PeriodSeconds: 15,
SuccessThreshold: 1,
FailureThreshold: 100,
},
LivenessProbe: &corev1.Probe{
Handler: corev1.Handler{
HTTPGet: &corev1.HTTPGetAction{
Path: "/",
Port: intstr.FromInt(seaweedv1.FilerHTTPPort),
Scheme: corev1.URISchemeHTTP,
},
},
InitialDelaySeconds: 20,
TimeoutSeconds: 3,
PeriodSeconds: 30,
SuccessThreshold: 1,
FailureThreshold: 6,
},
}}
dep := &appsv1.StatefulSet{
ObjectMeta: metav1.ObjectMeta{
Name: m.Name + "-filer",
Namespace: m.Namespace,
},
Spec: appsv1.StatefulSetSpec{
ServiceName: m.Name + "-filer",
ServiceName: m.Name + "-filer-peer",
PodManagementPolicy: appsv1.ParallelPodManagement,
Replicas: &replicas,
UpdateStrategy: appsv1.StatefulSetUpdateStrategy{
@ -38,95 +125,7 @@ func (r *SeaweedReconciler) createFilerStatefulSet(m *seaweedv1.Seaweed) *appsv1
ObjectMeta: metav1.ObjectMeta{
Labels: labels,
},
Spec: corev1.PodSpec{
EnableServiceLinks: &enableServiceLinks,
Containers: []corev1.Container{{
Name: "seaweedfs",
Image: "chrislusf/seaweedfs:latest",
ImagePullPolicy: corev1.PullIfNotPresent,
Env: []corev1.EnvVar{
{
Name: "POD_IP",
ValueFrom: &corev1.EnvVarSource{
FieldRef: &corev1.ObjectFieldSelector{
FieldPath: "status.podIP",
},
},
},
{
Name: "POD_NAME",
ValueFrom: &corev1.EnvVarSource{
FieldRef: &corev1.ObjectFieldSelector{
FieldPath: "metadata.name",
},
},
},
{
Name: "NAMESPACE",
ValueFrom: &corev1.EnvVarSource{
FieldRef: &corev1.ObjectFieldSelector{
FieldPath: "metadata.namespace",
},
},
},
},
Command: []string{
"/bin/sh",
"-ec",
fmt.Sprintf("weed filer -port=8888 %s %s",
fmt.Sprintf("-ip=$(POD_NAME).%s-filer", m.Name),
fmt.Sprintf("-peers=%s-master-0.%s-master:9333,%s-master-1.%s-master:9333,%s-master-2.%s-master:9333",
m.Name, m.Name, m.Name, m.Name, m.Name, m.Name),
),
},
Ports: []corev1.ContainerPort{
{
ContainerPort: 8888,
Name: "swfs-filer",
},
{
ContainerPort: 18888,
},
},
/*
ReadinessProbe: &corev1.Probe{
Handler: corev1.Handler{
HTTPGet: &corev1.HTTPGetAction{
Path: "/cluster/status",
Port: intstr.IntOrString{
Type: 0,
IntVal: 9333,
},
Scheme: "http",
},
},
InitialDelaySeconds: 5,
TimeoutSeconds: 0,
PeriodSeconds: 15,
SuccessThreshold: 2,
FailureThreshold: 100,
},
LivenessProbe: &corev1.Probe{
Handler: corev1.Handler{
HTTPGet: &corev1.HTTPGetAction{
Path: "/cluster/status",
Port: intstr.IntOrString{
Type: 0,
IntVal: 9333,
},
Scheme: "http",
},
},
InitialDelaySeconds: 20,
TimeoutSeconds: 0,
PeriodSeconds: 10,
SuccessThreshold: 1,
FailureThreshold: 6,
},
*/
}},
},
Spec: filerPodSpec,
},
},
}

View File

@ -0,0 +1,42 @@
package controllers
import (
"github.com/seaweedfs/seaweedfs-operator/controllers/label"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
seaweedv1 "github.com/seaweedfs/seaweedfs-operator/api/v1"
)
func (r *SeaweedReconciler) ensureSeaweedIngress(seaweedCR *seaweedv1.Seaweed) (done bool, result ctrl.Result, err error) {
if seaweedCR.Spec.HostSuffix != nil && len(*seaweedCR.Spec.HostSuffix) != 0 {
if done, result, err = r.ensureAllIngress(seaweedCR); done {
return
}
}
return
}
func (r *SeaweedReconciler) ensureAllIngress(seaweedCR *seaweedv1.Seaweed) (bool, ctrl.Result, error) {
log := r.Log.WithValues("sw-ingress", seaweedCR.Name)
ingressService := r.createAllIngress(seaweedCR)
if err := controllerutil.SetControllerReference(seaweedCR, ingressService, r.Scheme); err != nil {
return ReconcileResult(err)
}
_, err := r.CreateOrUpdateIngress(ingressService)
log.Info("ensure ingress " + ingressService.Name)
return ReconcileResult(err)
}
func labelsForIngress(name string) map[string]string {
return map[string]string{
label.ManagedByLabelKey: "seaweedfs-operator",
label.NameLabelKey: "seaweedfs",
label.ComponentLabelKey: "ingress",
label.InstanceLabelKey: name,
}
}

View File

@ -6,85 +6,146 @@ import (
appsv1 "k8s.io/api/apps/v1"
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/runtime"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
seaweedv1 "github.com/seaweedfs/seaweedfs-operator/api/v1"
)
const (
MasterClusterSize = 3
"github.com/seaweedfs/seaweedfs-operator/controllers/label"
)
func (r *SeaweedReconciler) ensureMaster(seaweedCR *seaweedv1.Seaweed) (done bool, result ctrl.Result, err error) {
_ = context.Background()
_ = r.Log.WithValues("seaweed", seaweedCR.Name)
if done, result, err = r.ensureMasterPeerService(seaweedCR); done {
return
}
if done, result, err = r.ensureMasterService(seaweedCR); done {
return done, result, err
return
}
if done, result, err = r.ensureMasterConfigMap(seaweedCR); done {
return
}
if done, result, err = r.ensureMasterStatefulSet(seaweedCR); done {
return done, result, err
return
}
return false, ctrl.Result{}, nil
if seaweedCR.Spec.Master.ConcurrentStart == nil || !*seaweedCR.Spec.Master.ConcurrentStart {
if done, result, err = r.waitForMasterStatefulSet(seaweedCR); done {
return
}
}
return
}
func (r *SeaweedReconciler) waitForMasterStatefulSet(seaweedCR *seaweedv1.Seaweed) (bool, ctrl.Result, error) {
log := r.Log.WithValues("sw-master-statefulset", seaweedCR.Name)
podList := &corev1.PodList{}
listOpts := []client.ListOption{
client.InNamespace(seaweedCR.Namespace),
client.MatchingLabels(labelsForMaster(seaweedCR.Name)),
}
if err := r.List(context.Background(), podList, listOpts...); err != nil {
log.Error(err, "Failed to list master pods", "namespace", seaweedCR.Namespace, "name", seaweedCR.Name)
return true, ctrl.Result{RequeueAfter: 3 * time.Second}, nil
}
log.Info("pods", "count", len(podList.Items))
runningCounter := 0
for _, pod := range podList.Items {
if pod.Status.Phase == corev1.PodRunning {
for _, containerStatus := range pod.Status.ContainerStatuses {
if containerStatus.Ready {
runningCounter++
}
log.Info("pod", "name", pod.Name, "containerStatus", containerStatus)
}
} else {
log.Info("pod", "name", pod.Name, "status", pod.Status)
}
}
if runningCounter < int(seaweedCR.Spec.Master.Replicas)/2+1 {
log.Info("some masters are not ready", "missing", int(seaweedCR.Spec.Master.Replicas)-runningCounter)
return true, ctrl.Result{RequeueAfter: 3 * time.Second}, nil
}
log.Info("masters are ready")
return ReconcileResult(nil)
}
func (r *SeaweedReconciler) ensureMasterStatefulSet(seaweedCR *seaweedv1.Seaweed) (bool, ctrl.Result, error) {
ctx := context.Background()
log := r.Log.WithValues("sw-master-statefulset", seaweedCR.Name)
masterStatefulSet := &appsv1.StatefulSet{}
err := r.Get(ctx, types.NamespacedName{Name: seaweedCR.Name + "-master", Namespace: seaweedCR.Namespace}, masterStatefulSet)
if err != nil && errors.IsNotFound(err) {
// Define a new deployment
dep := r.createMasterStatefulSet(seaweedCR)
log.Info("Creating a new master statefulset", "Namespace", dep.Namespace, "Name", dep.Name)
err = r.Create(ctx, dep)
if err != nil {
log.Error(err, "Failed to create new statefulset", "Namespace", dep.Namespace, "Name", dep.Name)
return true, ctrl.Result{}, err
}
// sleep 60 seconds for DNS to have pod IP addresses ready
time.Sleep(time.Minute)
// Deployment created successfully - return and requeue
return false, ctrl.Result{}, nil
} else if err != nil {
log.Error(err, "Failed to get Deployment")
return true, ctrl.Result{}, err
masterStatefulSet := r.createMasterStatefulSet(seaweedCR)
if err := controllerutil.SetControllerReference(seaweedCR, masterStatefulSet, r.Scheme); err != nil {
return ReconcileResult(err)
}
log.Info("Get master stateful set " + masterStatefulSet.Name)
return false, ctrl.Result{}, nil
_, err := r.CreateOrUpdate(masterStatefulSet, func(existing, desired runtime.Object) error {
existingStatefulSet := existing.(*appsv1.StatefulSet)
desiredStatefulSet := desired.(*appsv1.StatefulSet)
existingStatefulSet.Spec.Replicas = desiredStatefulSet.Spec.Replicas
existingStatefulSet.Spec.Template.Spec = desiredStatefulSet.Spec.Template.Spec
return nil
})
log.Info("ensure master stateful set " + masterStatefulSet.Name)
return ReconcileResult(err)
}
func (r *SeaweedReconciler) ensureMasterConfigMap(seaweedCR *seaweedv1.Seaweed) (bool, ctrl.Result, error) {
log := r.Log.WithValues("sw-master-configmap", seaweedCR.Name)
masterConfigMap := r.createMasterConfigMap(seaweedCR)
if err := controllerutil.SetControllerReference(seaweedCR, masterConfigMap, r.Scheme); err != nil {
return ReconcileResult(err)
}
_, err := r.CreateOrUpdateConfigMap(masterConfigMap)
log.Info("Get master ConfigMap " + masterConfigMap.Name)
return ReconcileResult(err)
}
func (r *SeaweedReconciler) ensureMasterService(seaweedCR *seaweedv1.Seaweed) (bool, ctrl.Result, error) {
ctx := context.Background()
log := r.Log.WithValues("sw-master-service", seaweedCR.Name)
masterService := &corev1.Service{}
err := r.Get(ctx, types.NamespacedName{Name: seaweedCR.Name + "-master", Namespace: seaweedCR.Namespace}, masterService)
if err != nil && errors.IsNotFound(err) {
// Define a new deployment
dep := r.createMasterService(seaweedCR)
log.Info("Creating a new master service", "Namespace", dep.Namespace, "Name", dep.Name)
err = r.Create(ctx, dep)
if err != nil {
log.Error(err, "Failed to create master service", "Namespace", dep.Namespace, "Name", dep.Name)
return true, ctrl.Result{}, err
}
// Deployment created successfully - return and requeue
return false, ctrl.Result{}, nil
} else if err != nil {
log.Error(err, "Failed to get master service", "Namespace", seaweedCR.Namespace, "Name", seaweedCR.Name+"-master")
return true, ctrl.Result{}, err
masterService := r.createMasterService(seaweedCR)
if err := controllerutil.SetControllerReference(seaweedCR, masterService, r.Scheme); err != nil {
return ReconcileResult(err)
}
_, err := r.CreateOrUpdateService(masterService)
log.Info("Get master service " + masterService.Name)
return false, ctrl.Result{}, nil
return ReconcileResult(err)
}
func (r *SeaweedReconciler) ensureMasterPeerService(seaweedCR *seaweedv1.Seaweed) (bool, ctrl.Result, error) {
log := r.Log.WithValues("sw-master-peer-service", seaweedCR.Name)
masterPeerService := r.createMasterPeerService(seaweedCR)
if err := controllerutil.SetControllerReference(seaweedCR, masterPeerService, r.Scheme); err != nil {
return ReconcileResult(err)
}
_, err := r.CreateOrUpdateService(masterPeerService)
log.Info("Get master peer service " + masterPeerService.Name)
return ReconcileResult(err)
}
func labelsForMaster(name string) map[string]string {
return map[string]string{"app": "seaweedfs", "role": "master", "name": name}
return map[string]string{
label.ManagedByLabelKey: "seaweedfs-operator",
label.NameLabelKey: "seaweedfs",
label.ComponentLabelKey: "master",
label.InstanceLabelKey: name,
}
}

View File

@ -0,0 +1,31 @@
package controllers
import (
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
seaweedv1 "github.com/seaweedfs/seaweedfs-operator/api/v1"
)
func (r *SeaweedReconciler) createMasterConfigMap(m *seaweedv1.Seaweed) *corev1.ConfigMap {
labels := labelsForMaster(m.Name)
toml := ""
if m.Spec.Master.Config != nil {
toml = *m.Spec.Master.Config
}
dep := &corev1.ConfigMap{
ObjectMeta: metav1.ObjectMeta{
Name: m.Name + "-master",
Namespace: m.Namespace,
Labels: labels,
},
Data: map[string]string{
"master.toml": toml,
},
}
// Set master instance as the owner and controller
// ctrl.SetControllerReference(m, dep, r.Scheme)
return dep
}

View File

@ -8,6 +8,43 @@ import (
seaweedv1 "github.com/seaweedfs/seaweedfs-operator/api/v1"
)
func (r *SeaweedReconciler) createMasterPeerService(m *seaweedv1.Seaweed) *corev1.Service {
labels := labelsForMaster(m.Name)
dep := &corev1.Service{
ObjectMeta: metav1.ObjectMeta{
Name: m.Name + "-master-peer",
Namespace: m.Namespace,
Labels: labels,
Annotations: map[string]string{
"service.alpha.kubernetes.io/tolerate-unready-endpoints": "true",
},
},
Spec: corev1.ServiceSpec{
ClusterIP: "None",
PublishNotReadyAddresses: true,
Ports: []corev1.ServicePort{
{
Name: "master-http",
Protocol: corev1.Protocol("TCP"),
Port: seaweedv1.MasterHTTPPort,
TargetPort: intstr.FromInt(seaweedv1.MasterHTTPPort),
},
{
Name: "master-grpc",
Protocol: corev1.Protocol("TCP"),
Port: seaweedv1.MasterGRPCPort,
TargetPort: intstr.FromInt(seaweedv1.MasterGRPCPort),
},
},
Selector: labels,
},
}
// Set master instance as the owner and controller
// ctrl.SetControllerReference(m, dep, r.Scheme)
return dep
}
func (r *SeaweedReconciler) createMasterService(m *seaweedv1.Seaweed) *corev1.Service {
labels := labelsForMaster(m.Name)
@ -21,32 +58,40 @@ func (r *SeaweedReconciler) createMasterService(m *seaweedv1.Seaweed) *corev1.Se
},
},
Spec: corev1.ServiceSpec{
ClusterIP: "None",
PublishNotReadyAddresses: true,
Ports: []corev1.ServicePort{
{
Name: "swfs-master",
Protocol: corev1.Protocol("TCP"),
Port: 9333,
TargetPort: intstr.IntOrString{
Type: intstr.Int,
IntVal: 9333,
},
Name: "master-http",
Protocol: corev1.Protocol("TCP"),
Port: seaweedv1.MasterHTTPPort,
TargetPort: intstr.FromInt(seaweedv1.MasterHTTPPort),
},
{
Name: "swfs-master-grpc",
Protocol: corev1.Protocol("TCP"),
Port: 19333,
TargetPort: intstr.IntOrString{
Type: intstr.Int,
IntVal: 19333,
},
Name: "master-grpc",
Protocol: corev1.Protocol("TCP"),
Port: seaweedv1.MasterGRPCPort,
TargetPort: intstr.FromInt(seaweedv1.MasterGRPCPort),
},
},
Selector: labels,
},
}
// Set master instance as the owner and controller
// ctrl.SetControllerReference(m, dep, r.Scheme)
if m.Spec.Master.Service != nil {
svcSpec := m.Spec.Master.Service
dep.Annotations = copyAnnotations(svcSpec.Annotations)
if svcSpec.Type != "" {
dep.Spec.Type = svcSpec.Type
}
if svcSpec.ClusterIP != nil {
dep.Spec.ClusterIP = *svcSpec.ClusterIP
}
if svcSpec.LoadBalancerIP != nil {
dep.Spec.LoadBalancerIP = *svcSpec.LoadBalancerIP
}
}
return dep
}

View File

@ -2,27 +2,128 @@ package controllers
import (
"fmt"
"strings"
appsv1 "k8s.io/api/apps/v1"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/intstr"
seaweedv1 "github.com/seaweedfs/seaweedfs-operator/api/v1"
)
func buildMasterStartupScript(m *seaweedv1.Seaweed) string {
command := []string{"weed", "-logtostderr=true", "master"}
spec := m.Spec.Master
if spec.VolumePreallocate != nil && *spec.VolumePreallocate {
command = append(command, "-volumePreallocate")
}
if spec.VolumeSizeLimitMB != nil {
command = append(command, fmt.Sprintf("-volumeSizeLimitMB=%d", *spec.VolumeSizeLimitMB))
}
if spec.GarbageThreshold != nil {
command = append(command, fmt.Sprintf("-garbageThreshold=%s", *spec.GarbageThreshold))
}
if spec.PulseSeconds != nil {
command = append(command, fmt.Sprintf("-pulseSeconds=%d", *spec.PulseSeconds))
}
if spec.DefaultReplication != nil {
command = append(command, fmt.Sprintf("-defaultReplication=%s", *spec.DefaultReplication))
}
command = append(command, fmt.Sprintf("-ip=$(POD_NAME).%s-master-peer.%s", m.Name, m.Namespace))
command = append(command, fmt.Sprintf("-peers=%s", getMasterPeersString(m)))
return strings.Join(command, " ")
}
func (r *SeaweedReconciler) createMasterStatefulSet(m *seaweedv1.Seaweed) *appsv1.StatefulSet {
labels := labelsForMaster(m.Name)
replicas := int32(MasterClusterSize)
replicas := m.Spec.Master.Replicas
rollingUpdatePartition := int32(0)
enableServiceLinks := false
masterPodSpec := m.BaseMasterSpec().BuildPodSpec()
masterPodSpec.Volumes = []corev1.Volume{
{
Name: "master-config",
VolumeSource: corev1.VolumeSource{
ConfigMap: &corev1.ConfigMapVolumeSource{
LocalObjectReference: corev1.LocalObjectReference{
Name: m.Name + "-master",
},
},
},
},
}
masterPodSpec.EnableServiceLinks = &enableServiceLinks
masterPodSpec.Containers = []corev1.Container{{
Name: "master",
Image: m.Spec.Image,
ImagePullPolicy: m.BaseMasterSpec().ImagePullPolicy(),
Env: append(m.BaseMasterSpec().Env(), kubernetesEnvVars...),
VolumeMounts: []corev1.VolumeMount{
{
Name: "master-config",
ReadOnly: true,
MountPath: "/etc/seaweedfs",
},
},
Command: []string{
"/bin/sh",
"-ec",
buildMasterStartupScript(m),
},
Ports: []corev1.ContainerPort{
{
ContainerPort: seaweedv1.MasterHTTPPort,
Name: "master-http",
},
{
ContainerPort: seaweedv1.MasterGRPCPort,
Name: "master-grpc",
},
},
ReadinessProbe: &corev1.Probe{
Handler: corev1.Handler{
HTTPGet: &corev1.HTTPGetAction{
Path: "/cluster/status",
Port: intstr.FromInt(seaweedv1.MasterHTTPPort),
Scheme: corev1.URISchemeHTTP,
},
},
InitialDelaySeconds: 5,
TimeoutSeconds: 15,
PeriodSeconds: 15,
SuccessThreshold: 2,
FailureThreshold: 100,
},
LivenessProbe: &corev1.Probe{
Handler: corev1.Handler{
HTTPGet: &corev1.HTTPGetAction{
Path: "/cluster/status",
Port: intstr.FromInt(seaweedv1.MasterHTTPPort),
Scheme: corev1.URISchemeHTTP,
},
},
InitialDelaySeconds: 15,
TimeoutSeconds: 15,
PeriodSeconds: 15,
SuccessThreshold: 1,
FailureThreshold: 6,
},
}}
dep := &appsv1.StatefulSet{
ObjectMeta: metav1.ObjectMeta{
Name: m.Name + "-master",
Namespace: m.Namespace,
},
Spec: appsv1.StatefulSetSpec{
ServiceName: m.Name + "-master",
ServiceName: m.Name + "-master-peer",
PodManagementPolicy: appsv1.ParallelPodManagement,
Replicas: &replicas,
UpdateStrategy: appsv1.StatefulSetUpdateStrategy{
@ -38,109 +139,7 @@ func (r *SeaweedReconciler) createMasterStatefulSet(m *seaweedv1.Seaweed) *appsv
ObjectMeta: metav1.ObjectMeta{
Labels: labels,
},
Spec: corev1.PodSpec{
/*
Affinity: &corev1.Affinity{
PodAntiAffinity: &corev1.PodAntiAffinity{
RequiredDuringSchedulingIgnoredDuringExecution: []corev1.PodAffinityTerm{
{
LabelSelector: &metav1.LabelSelector{
MatchLabels: labels,
},
TopologyKey: "kubernetes.io/hostname",
},
},
},
},
*/
EnableServiceLinks: &enableServiceLinks,
Containers: []corev1.Container{{
Name: "seaweedfs",
Image: "chrislusf/seaweedfs:latest",
ImagePullPolicy: corev1.PullIfNotPresent,
Env: []corev1.EnvVar{
{
Name: "POD_IP",
ValueFrom: &corev1.EnvVarSource{
FieldRef: &corev1.ObjectFieldSelector{
FieldPath: "status.podIP",
},
},
},
{
Name: "POD_NAME",
ValueFrom: &corev1.EnvVarSource{
FieldRef: &corev1.ObjectFieldSelector{
FieldPath: "metadata.name",
},
},
},
{
Name: "NAMESPACE",
ValueFrom: &corev1.EnvVarSource{
FieldRef: &corev1.ObjectFieldSelector{
FieldPath: "metadata.namespace",
},
},
},
},
Command: []string{
"/bin/sh",
"-ec",
fmt.Sprintf("sleep 60; weed master -volumePreallocate -volumeSizeLimitMB=1000 %s %s",
fmt.Sprintf("-ip=$(POD_NAME).%s-master", m.Name),
fmt.Sprintf("-peers=%s-master-0.%s-master:9333,%s-master-1.%s-master:9333,%s-master-2.%s-master:9333",
m.Name, m.Name, m.Name, m.Name, m.Name, m.Name),
),
},
Ports: []corev1.ContainerPort{
{
ContainerPort: 9333,
Name: "swfs-master",
},
{
ContainerPort: 19333,
},
},
/*
ReadinessProbe: &corev1.Probe{
Handler: corev1.Handler{
HTTPGet: &corev1.HTTPGetAction{
Path: "/cluster/status",
Port: intstr.IntOrString{
Type: 0,
IntVal: 9333,
},
Scheme: "http",
},
},
InitialDelaySeconds: 5,
TimeoutSeconds: 0,
PeriodSeconds: 15,
SuccessThreshold: 2,
FailureThreshold: 100,
},
LivenessProbe: &corev1.Probe{
Handler: corev1.Handler{
HTTPGet: &corev1.HTTPGetAction{
Path: "/cluster/status",
Port: intstr.IntOrString{
Type: 0,
IntVal: 9333,
},
Scheme: "http",
},
},
InitialDelaySeconds: 20,
TimeoutSeconds: 0,
PeriodSeconds: 10,
SuccessThreshold: 1,
FailureThreshold: 6,
},
*/
}},
},
Spec: masterPodSpec,
},
},
}

View File

@ -1,82 +0,0 @@
package controllers
import (
"context"
appsv1 "k8s.io/api/apps/v1"
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/types"
ctrl "sigs.k8s.io/controller-runtime"
seaweedv1 "github.com/seaweedfs/seaweedfs-operator/api/v1"
)
func (r *SeaweedReconciler) ensureS3Servers(seaweedCR *seaweedv1.Seaweed) (done bool, result ctrl.Result, err error) {
_ = context.Background()
_ = r.Log.WithValues("seaweed", seaweedCR.Name)
if done, result, err = r.ensureS3Deployment(seaweedCR); done {
return done, result, err
}
if done, result, err = r.ensureS3Service(seaweedCR); done {
return done, result, err
}
return false, ctrl.Result{}, nil
}
func (r *SeaweedReconciler) ensureS3Deployment(seaweedCR *seaweedv1.Seaweed) (bool, ctrl.Result, error) {
ctx := context.Background()
log := r.Log.WithValues("sw-s3-statefulset", seaweedCR.Name)
s3Deployment := &appsv1.Deployment{}
err := r.Get(ctx, types.NamespacedName{Name: seaweedCR.Name + "-s3", Namespace: seaweedCR.Namespace}, s3Deployment)
if err != nil && errors.IsNotFound(err) {
// Define a new deployment
dep := r.createS3Deployment(seaweedCR)
log.Info("Creating a new s3 deployment", "Namespace", dep.Namespace, "Name", dep.Name)
err = r.Create(ctx, dep)
if err != nil {
log.Error(err, "Failed to create new s3 statefulset", "Namespace", dep.Namespace, "Name", dep.Name)
return true, ctrl.Result{}, err
}
// Deployment created successfully - return and requeue
return false, ctrl.Result{}, nil
} else if err != nil {
log.Error(err, "Failed to get s3 statefulset")
return true, ctrl.Result{}, err
}
log.Info("Get s3 stateful set " + s3Deployment.Name)
return false, ctrl.Result{}, nil
}
func (r *SeaweedReconciler) ensureS3Service(seaweedCR *seaweedv1.Seaweed) (bool, ctrl.Result, error) {
ctx := context.Background()
log := r.Log.WithValues("sw-filer-service", seaweedCR.Name)
s3Service := &corev1.Service{}
err := r.Get(ctx, types.NamespacedName{Name: seaweedCR.Name + "-s3", Namespace: seaweedCR.Namespace}, s3Service)
if err != nil && errors.IsNotFound(err) {
// Define a new deployment
dep := r.createS3Service(seaweedCR)
log.Info("Creating a new s3 service", "Namespace", dep.Namespace, "Name", dep.Name)
err = r.Create(ctx, dep)
if err != nil {
log.Error(err, "Failed to create new s3 service", "Namespace", dep.Namespace, "Name", dep.Name)
return true, ctrl.Result{}, err
}
// Deployment created successfully - return and requeue
return false, ctrl.Result{}, nil
} else if err != nil {
log.Error(err, "Failed to get s3 server service")
return true, ctrl.Result{}, err
}
log.Info("Get s3 service " + s3Service.Name)
return false, ctrl.Result{}, nil
}
func labelsForS3(name string) map[string]string {
return map[string]string{"app": "seaweedfs", "role": "s3", "name": name}
}

View File

@ -1,123 +0,0 @@
package controllers
import (
"fmt"
appsv1 "k8s.io/api/apps/v1"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
seaweedv1 "github.com/seaweedfs/seaweedfs-operator/api/v1"
)
func (r *SeaweedReconciler) createS3Deployment(m *seaweedv1.Seaweed) *appsv1.Deployment {
labels := labelsForS3(m.Name)
replicas := int32(m.Spec.S3Count)
enableServiceLinks := false
dep := &appsv1.Deployment{
ObjectMeta: metav1.ObjectMeta{
Name: m.Name + "-s3",
Namespace: m.Namespace,
},
Spec: appsv1.DeploymentSpec{
Replicas: &replicas,
Selector: &metav1.LabelSelector{
MatchLabels: labels,
},
Template: corev1.PodTemplateSpec{
ObjectMeta: metav1.ObjectMeta{
Labels: labels,
},
Spec: corev1.PodSpec{
EnableServiceLinks: &enableServiceLinks,
Containers: []corev1.Container{{
Name: "seaweedfs",
Image: "chrislusf/seaweedfs:latest",
ImagePullPolicy: corev1.PullIfNotPresent,
Env: []corev1.EnvVar{
{
Name: "POD_IP",
ValueFrom: &corev1.EnvVarSource{
FieldRef: &corev1.ObjectFieldSelector{
FieldPath: "status.podIP",
},
},
},
{
Name: "POD_NAME",
ValueFrom: &corev1.EnvVarSource{
FieldRef: &corev1.ObjectFieldSelector{
FieldPath: "metadata.name",
},
},
},
{
Name: "NAMESPACE",
ValueFrom: &corev1.EnvVarSource{
FieldRef: &corev1.ObjectFieldSelector{
FieldPath: "metadata.namespace",
},
},
},
},
Command: []string{
"/bin/sh",
"-ec",
fmt.Sprintf("weed s3 -port=8333 %s",
fmt.Sprintf("-filer=$(POD_NAME).%s-filer:8888", m.Name),
),
},
Ports: []corev1.ContainerPort{
{
ContainerPort: 8333,
Name: "swfs-s3",
},
{
ContainerPort: 18333,
},
},
/*
ReadinessProbe: &corev1.Probe{
Handler: corev1.Handler{
HTTPGet: &corev1.HTTPGetAction{
Path: "/cluster/status",
Port: intstr.IntOrString{
Type: 0,
IntVal: 9333,
},
Scheme: "http",
},
},
InitialDelaySeconds: 5,
TimeoutSeconds: 0,
PeriodSeconds: 15,
SuccessThreshold: 2,
FailureThreshold: 100,
},
LivenessProbe: &corev1.Probe{
Handler: corev1.Handler{
HTTPGet: &corev1.HTTPGetAction{
Path: "/cluster/status",
Port: intstr.IntOrString{
Type: 0,
IntVal: 9333,
},
Scheme: "http",
},
},
InitialDelaySeconds: 20,
TimeoutSeconds: 0,
PeriodSeconds: 10,
SuccessThreshold: 1,
FailureThreshold: 6,
},
*/
}},
},
},
},
}
return dep
}

View File

@ -1,36 +0,0 @@
package controllers
import (
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/intstr"
seaweedv1 "github.com/seaweedfs/seaweedfs-operator/api/v1"
)
func (r *SeaweedReconciler) createS3Service(m *seaweedv1.Seaweed) *corev1.Service {
labels := labelsForS3(m.Name)
dep := &corev1.Service{
ObjectMeta: metav1.ObjectMeta{
Name: m.Name + "-s3",
Namespace: m.Namespace,
Labels: labels,
},
Spec: corev1.ServiceSpec{
Ports: []corev1.ServicePort{
{
Name: "swfs-s3",
Protocol: corev1.Protocol("TCP"),
Port: 8333,
TargetPort: intstr.IntOrString{
Type: intstr.Int,
IntVal: 8333,
},
},
},
Selector: labels,
},
}
return dep
}

View File

@ -0,0 +1,328 @@
package controllers
import (
"context"
"encoding/json"
"fmt"
appsv1 "k8s.io/api/apps/v1"
corev1 "k8s.io/api/core/v1"
extensionsv1beta1 "k8s.io/api/extensions/v1beta1"
apiequality "k8s.io/apimachinery/pkg/api/equality"
"k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/client-go/kubernetes/scheme"
"k8s.io/klog"
"sigs.k8s.io/controller-runtime/pkg/client"
)
// the following is adapted from tidb-operator/pkg/controller/generic_control.go
const (
// LastAppliedPodTemplate is annotation key of the last applied pod template
LastAppliedPodTemplate = "seaweedfs.com/last-applied-podtemplate"
// LastAppliedConfigAnnotation is annotation key of last applied configuration
LastAppliedConfigAnnotation = "seaweedfs.com/last-applied-configuration"
)
// MergeFn is to resolve conflicts
type MergeFn func(existing, desired runtime.Object) error
// CreateOrUpdate create an object to the Kubernetes cluster for controller, if the object to create is existed,
// call mergeFn to merge the change in new object to the existing object, then update the existing object.
// The object will also be adopted by the given controller.
func (r *SeaweedReconciler) CreateOrUpdate(obj runtime.Object, mergeFn MergeFn) (runtime.Object, error) {
// controller-runtime/client will mutate the object pointer in-place,
// to be consistent with other methods in our controller, we copy the object
// to avoid the in-place mutation here and hereafter.
desired := obj.DeepCopyObject()
// 1. try to create and see if there is any conflicts
err := r.Create(context.TODO(), desired)
if errors.IsAlreadyExists(err) {
// 2. object has already existed, merge our desired changes to it
existing, err := EmptyClone(obj)
if err != nil {
return nil, err
}
key, err := client.ObjectKeyFromObject(existing)
if err != nil {
return nil, err
}
err = r.Get(context.TODO(), key, existing)
if err != nil {
return nil, err
}
mutated := existing.DeepCopyObject()
// 4. invoke mergeFn to mutate a copy of the existing object
if err := mergeFn(mutated, desired); err != nil {
return nil, err
}
// 5. check if the copy is actually mutated
if !apiequality.Semantic.DeepEqual(existing, mutated) {
err := r.Update(context.TODO(), mutated)
return mutated, err
}
return mutated, nil
}
return desired, err
}
func (r *SeaweedReconciler) addSpecToAnnotation(d *appsv1.Deployment) error {
b, err := json.Marshal(d.Spec.Template.Spec)
if err != nil {
return err
}
if d.Annotations == nil {
d.Annotations = map[string]string{}
}
d.Annotations[LastAppliedPodTemplate] = string(b)
return nil
}
func (r *SeaweedReconciler) CreateOrUpdateDeployment(deploy *appsv1.Deployment) (*appsv1.Deployment, error) {
r.addSpecToAnnotation(deploy)
result, err := r.CreateOrUpdate(deploy, func(existing, desired runtime.Object) error {
existingDep := existing.(*appsv1.Deployment)
desiredDep := desired.(*appsv1.Deployment)
existingDep.Spec.Replicas = desiredDep.Spec.Replicas
existingDep.Labels = desiredDep.Labels
if existingDep.Annotations == nil {
existingDep.Annotations = map[string]string{}
}
for k, v := range desiredDep.Annotations {
existingDep.Annotations[k] = v
}
// only override the default strategy if it is explicitly set in the desiredDep
if string(desiredDep.Spec.Strategy.Type) != "" {
existingDep.Spec.Strategy.Type = desiredDep.Spec.Strategy.Type
if existingDep.Spec.Strategy.RollingUpdate != nil {
existingDep.Spec.Strategy.RollingUpdate = desiredDep.Spec.Strategy.RollingUpdate
}
}
// pod selector of deployment is immutable, so we don't mutate the labels of pod
for k, v := range desiredDep.Spec.Template.Annotations {
existingDep.Spec.Template.Annotations[k] = v
}
// podSpec of deployment is hard to merge, use an annotation to assist
if DeploymentPodSpecChanged(desiredDep, existingDep) {
// Record last applied spec in favor of future equality check
b, err := json.Marshal(desiredDep.Spec.Template.Spec)
if err != nil {
return err
}
existingDep.Annotations[LastAppliedConfigAnnotation] = string(b)
existingDep.Spec.Template.Spec = desiredDep.Spec.Template.Spec
}
return nil
})
if err != nil {
return nil, err
}
return result.(*appsv1.Deployment), err
}
func (r *SeaweedReconciler) CreateOrUpdateService(svc *corev1.Service) (*corev1.Service, error) {
result, err := r.CreateOrUpdate(svc, func(existing, desired runtime.Object) error {
existingSvc := existing.(*corev1.Service)
desiredSvc := desired.(*corev1.Service)
if existingSvc.Annotations == nil {
existingSvc.Annotations = map[string]string{}
}
for k, v := range desiredSvc.Annotations {
existingSvc.Annotations[k] = v
}
existingSvc.Labels = desiredSvc.Labels
equal, err := ServiceEqual(desiredSvc, existingSvc)
if err != nil {
return err
}
if !equal {
// record desiredSvc Spec in annotations in favor of future equality checks
b, err := json.Marshal(desiredSvc.Spec)
if err != nil {
return err
}
existingSvc.Annotations[LastAppliedConfigAnnotation] = string(b)
clusterIp := existingSvc.Spec.ClusterIP
ports := existingSvc.Spec.Ports
serviceType := existingSvc.Spec.Type
existingSvc.Spec = desiredSvc.Spec
existingSvc.Spec.ClusterIP = clusterIp
// If the existed service and the desired service is NodePort or LoadBalancerType, we should keep the nodePort unchanged.
if (serviceType == corev1.ServiceTypeNodePort || serviceType == corev1.ServiceTypeLoadBalancer) &&
(desiredSvc.Spec.Type == corev1.ServiceTypeNodePort || desiredSvc.Spec.Type == corev1.ServiceTypeLoadBalancer) {
for i, dport := range existingSvc.Spec.Ports {
for _, eport := range ports {
// Because the portName could be edited,
// we use Port number to link the desired Service Port and the existed Service Port in the nested loop
if dport.Port == eport.Port && dport.Protocol == eport.Protocol {
dport.NodePort = eport.NodePort
existingSvc.Spec.Ports[i] = dport
break
}
}
}
}
}
return nil
})
if err != nil {
return nil, err
}
return result.(*corev1.Service), nil
}
func (r *SeaweedReconciler) CreateOrUpdateIngress(ingress *extensionsv1beta1.Ingress) (*extensionsv1beta1.Ingress, error) {
result, err := r.CreateOrUpdate(ingress, func(existing, desired runtime.Object) error {
existingIngress := existing.(*extensionsv1beta1.Ingress)
desiredIngress := desired.(*extensionsv1beta1.Ingress)
if existingIngress.Annotations == nil {
existingIngress.Annotations = map[string]string{}
}
for k, v := range desiredIngress.Annotations {
existingIngress.Annotations[k] = v
}
existingIngress.Labels = desiredIngress.Labels
equal, err := IngressEqual(desiredIngress, existingIngress)
if err != nil {
return err
}
if !equal {
// record desiredIngress Spec in annotations in favor of future equality checks
b, err := json.Marshal(desiredIngress.Spec)
if err != nil {
return err
}
existingIngress.Annotations[LastAppliedConfigAnnotation] = string(b)
existingIngress.Spec = desiredIngress.Spec
}
return nil
})
if err != nil {
return nil, err
}
return result.(*extensionsv1beta1.Ingress), nil
}
func (r *SeaweedReconciler) CreateOrUpdateConfigMap(configMap *corev1.ConfigMap) (*corev1.ConfigMap, error) {
result, err := r.CreateOrUpdate(configMap, func(existing, desired runtime.Object) error {
existingConfigMap := existing.(*corev1.ConfigMap)
desiredConfigMap := desired.(*corev1.ConfigMap)
if existingConfigMap.Annotations == nil {
existingConfigMap.Annotations = map[string]string{}
}
for k, v := range desiredConfigMap.Annotations {
existingConfigMap.Annotations[k] = v
}
existingConfigMap.Labels = desiredConfigMap.Labels
existingConfigMap.Data = desiredConfigMap.Data
return nil
})
if err != nil {
return nil, err
}
return result.(*corev1.ConfigMap), nil
}
// EmptyClone create an clone of the resource with the same name and namespace (if namespace-scoped), with other fields unset
func EmptyClone(obj runtime.Object) (runtime.Object, error) {
meta, ok := obj.(metav1.Object)
if !ok {
return nil, fmt.Errorf("Obj %v is not a metav1.Object, cannot call EmptyClone", obj)
}
gvk, err := InferObjectKind(obj)
if err != nil {
return nil, err
}
inst, err := scheme.Scheme.New(gvk)
if err != nil {
return nil, err
}
instMeta, ok := inst.(metav1.Object)
if !ok {
return nil, fmt.Errorf("New instatnce %v created from scheme is not a metav1.Object, EmptyClone failed", inst)
}
instMeta.SetName(meta.GetName())
instMeta.SetNamespace(meta.GetNamespace())
return inst, nil
}
// InferObjectKind infers the object kind
func InferObjectKind(obj runtime.Object) (schema.GroupVersionKind, error) {
gvks, _, err := scheme.Scheme.ObjectKinds(obj)
if err != nil {
return schema.GroupVersionKind{}, err
}
if len(gvks) != 1 {
return schema.GroupVersionKind{}, fmt.Errorf("Object %v has ambigious GVK", obj)
}
return gvks[0], nil
}
// GetDeploymentLastAppliedPodTemplate set last applied pod template from Deployment's annotation
func GetDeploymentLastAppliedPodTemplate(dep *appsv1.Deployment) (*corev1.PodSpec, error) {
applied, ok := dep.Annotations[LastAppliedPodTemplate]
if !ok {
return nil, fmt.Errorf("deployment:[%s/%s] not found spec's apply config", dep.GetNamespace(), dep.GetName())
}
podSpec := &corev1.PodSpec{}
err := json.Unmarshal([]byte(applied), podSpec)
if err != nil {
return nil, err
}
return podSpec, nil
}
// DeploymentPodSpecChanged checks whether the new deployment differs with the old one's last-applied-config
func DeploymentPodSpecChanged(newDep *appsv1.Deployment, oldDep *appsv1.Deployment) bool {
lastAppliedPodTemplate, err := GetDeploymentLastAppliedPodTemplate(oldDep)
if err != nil {
klog.Warningf("error get last-applied-config of deployment %s/%s: %v", oldDep.Namespace, oldDep.Name, err)
return true
}
return !apiequality.Semantic.DeepEqual(newDep.Spec.Template.Spec, lastAppliedPodTemplate)
}
// ServiceEqual compares the new Service's spec with old Service's last applied config
func ServiceEqual(newSvc, oldSvc *corev1.Service) (bool, error) {
oldSpec := corev1.ServiceSpec{}
if lastAppliedConfig, ok := oldSvc.Annotations[LastAppliedConfigAnnotation]; ok {
err := json.Unmarshal([]byte(lastAppliedConfig), &oldSpec)
if err != nil {
klog.Errorf("unmarshal ServiceSpec: [%s/%s]'s applied config failed,error: %v", oldSvc.GetNamespace(), oldSvc.GetName(), err)
return false, err
}
return apiequality.Semantic.DeepEqual(oldSpec, newSvc.Spec), nil
}
return false, nil
}
func IngressEqual(newIngress, oldIngres *extensionsv1beta1.Ingress) (bool, error) {
oldIngressSpec := extensionsv1beta1.IngressSpec{}
if lastAppliedConfig, ok := oldIngres.Annotations[LastAppliedConfigAnnotation]; ok {
err := json.Unmarshal([]byte(lastAppliedConfig), &oldIngressSpec)
if err != nil {
klog.Errorf("unmarshal IngressSpec: [%s/%s]'s applied config failed,error: %v", oldIngres.GetNamespace(), oldIngres.GetName(), err)
return false, err
}
return apiequality.Semantic.DeepEqual(oldIngressSpec, newIngress.Spec), nil
}
return false, nil
}

View File

@ -3,80 +3,100 @@ package controllers
import (
"context"
"k8s.io/apimachinery/pkg/runtime"
appsv1 "k8s.io/api/apps/v1"
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/types"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
seaweedv1 "github.com/seaweedfs/seaweedfs-operator/api/v1"
label "github.com/seaweedfs/seaweedfs-operator/controllers/label"
)
func (r *SeaweedReconciler) ensureVolumeServers(seaweedCR *seaweedv1.Seaweed) (done bool, result ctrl.Result, err error) {
_ = context.Background()
_ = r.Log.WithValues("seaweed", seaweedCR.Name)
if done, result, err = r.ensureVolumeServerPeerService(seaweedCR); done {
return
}
if done, result, err = r.ensureVolumeServerServices(seaweedCR); done {
return
}
if done, result, err = r.ensureVolumeServerStatefulSet(seaweedCR); done {
return done, result, err
return
}
if done, result, err = r.ensureVolumeServerService(seaweedCR); done {
return done, result, err
}
return false, ctrl.Result{}, nil
return
}
func (r *SeaweedReconciler) ensureVolumeServerStatefulSet(seaweedCR *seaweedv1.Seaweed) (bool, ctrl.Result, error) {
ctx := context.Background()
log := r.Log.WithValues("sw-volume-statefulset", seaweedCR.Name)
volumeServerStatefulSet := &appsv1.StatefulSet{}
err := r.Get(ctx, types.NamespacedName{Name: seaweedCR.Name + "-volume", Namespace: seaweedCR.Namespace}, volumeServerStatefulSet)
if err != nil && errors.IsNotFound(err) {
// Define a new deployment
dep := r.createVolumeServerStatefulSet(seaweedCR)
log.Info("Creating a new volume statefulset", "Namespace", dep.Namespace, "Name", dep.Name)
err = r.Create(ctx, dep)
if err != nil {
log.Error(err, "Failed to create new volume statefulset", "Namespace", dep.Namespace, "Name", dep.Name)
return true, ctrl.Result{}, err
}
// Deployment created successfully - return and requeue
return false, ctrl.Result{}, nil
} else if err != nil {
log.Error(err, "Failed to get volume server statefulset")
return true, ctrl.Result{}, err
volumeServerStatefulSet := r.createVolumeServerStatefulSet(seaweedCR)
if err := controllerutil.SetControllerReference(seaweedCR, volumeServerStatefulSet, r.Scheme); err != nil {
return ReconcileResult(err)
}
log.Info("Get volume stateful set " + volumeServerStatefulSet.Name)
return false, ctrl.Result{}, nil
_, err := r.CreateOrUpdate(volumeServerStatefulSet, func(existing, desired runtime.Object) error {
existingStatefulSet := existing.(*appsv1.StatefulSet)
desiredStatefulSet := desired.(*appsv1.StatefulSet)
existingStatefulSet.Spec.Replicas = desiredStatefulSet.Spec.Replicas
existingStatefulSet.Spec.Template.Spec = desiredStatefulSet.Spec.Template.Spec
return nil
})
log.Info("ensure volume stateful set " + volumeServerStatefulSet.Name)
return ReconcileResult(err)
}
func (r *SeaweedReconciler) ensureVolumeServerService(seaweedCR *seaweedv1.Seaweed) (bool, ctrl.Result, error) {
ctx := context.Background()
log := r.Log.WithValues("sw-volume-service", seaweedCR.Name)
func (r *SeaweedReconciler) ensureVolumeServerPeerService(seaweedCR *seaweedv1.Seaweed) (bool, ctrl.Result, error) {
volumeServerService := &corev1.Service{}
err := r.Get(ctx, types.NamespacedName{Name: seaweedCR.Name + "-volume", Namespace: seaweedCR.Namespace}, volumeServerService)
if err != nil && errors.IsNotFound(err) {
// Define a new deployment
dep := r.createVolumeServerService(seaweedCR)
log.Info("Creating a new volume service", "Namespace", dep.Namespace, "Name", dep.Name)
err = r.Create(ctx, dep)
if err != nil {
log.Error(err, "Failed to create new volume service", "Namespace", dep.Namespace, "Name", dep.Name)
return true, ctrl.Result{}, err
}
// Deployment created successfully - return and requeue
return false, ctrl.Result{}, nil
} else if err != nil {
log.Error(err, "Failed to get volume server service")
return true, ctrl.Result{}, err
log := r.Log.WithValues("sw-volume-peer-service", seaweedCR.Name)
volumeServerPeerService := r.createVolumeServerPeerService(seaweedCR)
if err := controllerutil.SetControllerReference(seaweedCR, volumeServerPeerService, r.Scheme); err != nil {
return ReconcileResult(err)
}
log.Info("Get volume service " + volumeServerService.Name)
return false, ctrl.Result{}, nil
_, err := r.CreateOrUpdateService(volumeServerPeerService)
log.Info("ensure volume peer service " + volumeServerPeerService.Name)
return ReconcileResult(err)
}
func (r *SeaweedReconciler) ensureVolumeServerServices(seaweedCR *seaweedv1.Seaweed) (bool, ctrl.Result, error) {
for i := 0; i < int(seaweedCR.Spec.Volume.Replicas); i++ {
done, result, err := r.ensureVolumeServerService(seaweedCR, i)
if done {
return done, result, err
}
}
return ReconcileResult(nil)
}
func (r *SeaweedReconciler) ensureVolumeServerService(seaweedCR *seaweedv1.Seaweed, i int) (bool, ctrl.Result, error) {
log := r.Log.WithValues("sw-volume-service", seaweedCR.Name, "index", i)
volumeServerService := r.createVolumeServerService(seaweedCR, i)
if err := controllerutil.SetControllerReference(seaweedCR, volumeServerService, r.Scheme); err != nil {
return ReconcileResult(err)
}
_, err := r.CreateOrUpdateService(volumeServerService)
log.Info("ensure volume service "+volumeServerService.Name, "index", i)
return ReconcileResult(err)
}
func labelsForVolumeServer(name string) map[string]string {
return map[string]string{"app": "seaweedfs", "role": "volume", "name": name}
return map[string]string{
label.ManagedByLabelKey: "seaweedfs-operator",
label.NameLabelKey: "seaweedfs",
label.ComponentLabelKey: "volume",
label.InstanceLabelKey: name,
}
}

View File

@ -1,6 +1,9 @@
package controllers
import (
"fmt"
"github.com/seaweedfs/seaweedfs-operator/controllers/label"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/intstr"
@ -8,12 +11,12 @@ import (
seaweedv1 "github.com/seaweedfs/seaweedfs-operator/api/v1"
)
func (r *SeaweedReconciler) createVolumeServerService(m *seaweedv1.Seaweed) *corev1.Service {
func (r *SeaweedReconciler) createVolumeServerPeerService(m *seaweedv1.Seaweed) *corev1.Service {
labels := labelsForVolumeServer(m.Name)
dep := &corev1.Service{
ObjectMeta: metav1.ObjectMeta{
Name: m.Name + "-volume",
Name: m.Name + "-volume-peer",
Namespace: m.Namespace,
Labels: labels,
Annotations: map[string]string{
@ -25,22 +28,16 @@ func (r *SeaweedReconciler) createVolumeServerService(m *seaweedv1.Seaweed) *cor
PublishNotReadyAddresses: true,
Ports: []corev1.ServicePort{
{
Name: "swfs-volume",
Protocol: corev1.Protocol("TCP"),
Port: 8444,
TargetPort: intstr.IntOrString{
Type: intstr.Int,
IntVal: 8444,
},
Name: "volume-http",
Protocol: corev1.Protocol("TCP"),
Port: seaweedv1.VolumeHTTPPort,
TargetPort: intstr.FromInt(seaweedv1.VolumeHTTPPort),
},
{
Name: "swfs-volume-grpc",
Protocol: corev1.Protocol("TCP"),
Port: 18444,
TargetPort: intstr.IntOrString{
Type: intstr.Int,
IntVal: 18444,
},
Name: "volume-grpc",
Protocol: corev1.Protocol("TCP"),
Port: seaweedv1.VolumeGRPCPort,
TargetPort: intstr.FromInt(seaweedv1.VolumeGRPCPort),
},
},
Selector: labels,
@ -48,3 +45,56 @@ func (r *SeaweedReconciler) createVolumeServerService(m *seaweedv1.Seaweed) *cor
}
return dep
}
func (r *SeaweedReconciler) createVolumeServerService(m *seaweedv1.Seaweed, i int) *corev1.Service {
labels := labelsForVolumeServer(m.Name)
serviceName := fmt.Sprintf("%s-volume-%d", m.Name, i)
labels[label.PodName] = serviceName
dep := &corev1.Service{
ObjectMeta: metav1.ObjectMeta{
Name: serviceName,
Namespace: m.Namespace,
Labels: labels,
Annotations: map[string]string{
"service.alpha.kubernetes.io/tolerate-unready-endpoints": "true",
},
},
Spec: corev1.ServiceSpec{
PublishNotReadyAddresses: true,
Ports: []corev1.ServicePort{
{
Name: "volume-http",
Protocol: corev1.Protocol("TCP"),
Port: seaweedv1.VolumeHTTPPort,
TargetPort: intstr.FromInt(seaweedv1.VolumeHTTPPort),
},
{
Name: "volume-grpc",
Protocol: corev1.Protocol("TCP"),
Port: seaweedv1.VolumeGRPCPort,
TargetPort: intstr.FromInt(seaweedv1.VolumeGRPCPort),
},
},
Selector: labels,
},
}
if m.Spec.Volume.Service != nil {
svcSpec := m.Spec.Volume.Service
dep.Annotations = copyAnnotations(svcSpec.Annotations)
if svcSpec.Type != "" {
dep.Spec.Type = svcSpec.Type
}
if svcSpec.ClusterIP != nil {
dep.Spec.ClusterIP = *svcSpec.ClusterIP
}
if svcSpec.LoadBalancerIP != nil {
dep.Spec.LoadBalancerIP = *svcSpec.LoadBalancerIP
}
}
return dep
}

View File

@ -2,27 +2,139 @@ package controllers
import (
"fmt"
"strings"
appsv1 "k8s.io/api/apps/v1"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/intstr"
seaweedv1 "github.com/seaweedfs/seaweedfs-operator/api/v1"
)
func buildVolumeServerStartupScript(m *seaweedv1.Seaweed, dirs []string) string {
commands := []string{"weed", "-logtostderr=true", "volume"}
commands = append(commands, fmt.Sprintf("-port=%d", seaweedv1.VolumeHTTPPort))
commands = append(commands, "-max=0")
commands = append(commands, fmt.Sprintf("-ip=$(POD_NAME).%s-volume-peer.%s", m.Name, m.Namespace))
if m.Spec.HostSuffix != nil && *m.Spec.HostSuffix != "" {
commands = append(commands, fmt.Sprintf("-publicUrl=$(POD_NAME).%s", *m.Spec.HostSuffix))
}
commands = append(commands, fmt.Sprintf("-mserver=%s", getMasterPeersString(m)))
commands = append(commands, fmt.Sprintf("-dir=%s", strings.Join(dirs, ",")))
return strings.Join(commands, " ")
}
func (r *SeaweedReconciler) createVolumeServerStatefulSet(m *seaweedv1.Seaweed) *appsv1.StatefulSet {
labels := labelsForVolumeServer(m.Name)
replicas := int32(m.Spec.VolumeServerCount)
replicas := int32(m.Spec.Volume.Replicas)
rollingUpdatePartition := int32(0)
enableServiceLinks := false
volumeCount := int(m.Spec.VolumeServerDiskCount)
volumeRequests := corev1.ResourceList{
corev1.ResourceStorage: m.Spec.Volume.Requests[corev1.ResourceStorage],
}
// connect all the disks
var volumeMounts []corev1.VolumeMount
var volumes []corev1.Volume
var persistentVolumeClaims []corev1.PersistentVolumeClaim
var dirs []string
for i := 0; i < volumeCount; i++ {
volumeMounts = append(volumeMounts, corev1.VolumeMount{
Name: fmt.Sprintf("mount%d", i),
ReadOnly: false,
MountPath: fmt.Sprintf("/data%d/", i),
})
volumes = append(volumes, corev1.Volume{
Name: fmt.Sprintf("mount%d", i),
VolumeSource: corev1.VolumeSource{
PersistentVolumeClaim: &corev1.PersistentVolumeClaimVolumeSource{
ClaimName: fmt.Sprintf("mount%d", i),
ReadOnly: false,
},
},
})
persistentVolumeClaims = append(persistentVolumeClaims, corev1.PersistentVolumeClaim{
ObjectMeta: metav1.ObjectMeta{
Name: fmt.Sprintf("mount%d", i),
},
Spec: corev1.PersistentVolumeClaimSpec{
StorageClassName: m.Spec.Volume.StorageClassName,
AccessModes: []corev1.PersistentVolumeAccessMode{
corev1.ReadWriteOnce,
},
Resources: corev1.ResourceRequirements{
Requests: volumeRequests,
},
},
})
dirs = append(dirs, fmt.Sprintf("/data%d", i))
}
volumePodSpec := m.BaseVolumeSpec().BuildPodSpec()
volumePodSpec.EnableServiceLinks = &enableServiceLinks
volumePodSpec.Containers = []corev1.Container{{
Name: "volume",
Image: m.Spec.Image,
ImagePullPolicy: m.BaseVolumeSpec().ImagePullPolicy(),
Env: append(m.BaseVolumeSpec().Env(), kubernetesEnvVars...),
Command: []string{
"/bin/sh",
"-ec",
buildVolumeServerStartupScript(m, dirs),
},
Ports: []corev1.ContainerPort{
{
ContainerPort: seaweedv1.VolumeHTTPPort,
Name: "volume-http",
},
{
ContainerPort: seaweedv1.VolumeGRPCPort,
Name: "volume-grpc",
},
},
ReadinessProbe: &corev1.Probe{
Handler: corev1.Handler{
HTTPGet: &corev1.HTTPGetAction{
Path: "/status",
Port: intstr.FromInt(seaweedv1.VolumeHTTPPort),
Scheme: corev1.URISchemeHTTP,
},
},
InitialDelaySeconds: 15,
TimeoutSeconds: 5,
PeriodSeconds: 90,
SuccessThreshold: 1,
FailureThreshold: 100,
},
LivenessProbe: &corev1.Probe{
Handler: corev1.Handler{
HTTPGet: &corev1.HTTPGetAction{
Path: "/status",
Port: intstr.FromInt(seaweedv1.VolumeHTTPPort),
Scheme: corev1.URISchemeHTTP,
},
},
InitialDelaySeconds: 20,
TimeoutSeconds: 5,
PeriodSeconds: 90,
SuccessThreshold: 1,
FailureThreshold: 6,
},
VolumeMounts: volumeMounts,
}}
volumePodSpec.Volumes = volumes
dep := &appsv1.StatefulSet{
ObjectMeta: metav1.ObjectMeta{
Name: m.Name + "-volume",
Namespace: m.Namespace,
},
Spec: appsv1.StatefulSetSpec{
ServiceName: m.Name + "-volume",
ServiceName: m.Name + "-volume-peer",
PodManagementPolicy: appsv1.ParallelPodManagement,
Replicas: &replicas,
UpdateStrategy: appsv1.StatefulSetUpdateStrategy{
@ -38,96 +150,9 @@ func (r *SeaweedReconciler) createVolumeServerStatefulSet(m *seaweedv1.Seaweed)
ObjectMeta: metav1.ObjectMeta{
Labels: labels,
},
Spec: corev1.PodSpec{
EnableServiceLinks: &enableServiceLinks,
Containers: []corev1.Container{{
Name: "seaweedfs",
Image: "chrislusf/seaweedfs:latest",
ImagePullPolicy: corev1.PullIfNotPresent,
Env: []corev1.EnvVar{
{
Name: "POD_IP",
ValueFrom: &corev1.EnvVarSource{
FieldRef: &corev1.ObjectFieldSelector{
FieldPath: "status.podIP",
},
},
},
{
Name: "POD_NAME",
ValueFrom: &corev1.EnvVarSource{
FieldRef: &corev1.ObjectFieldSelector{
FieldPath: "metadata.name",
},
},
},
{
Name: "NAMESPACE",
ValueFrom: &corev1.EnvVarSource{
FieldRef: &corev1.ObjectFieldSelector{
FieldPath: "metadata.namespace",
},
},
},
},
Command: []string{
"/bin/sh",
"-ec",
fmt.Sprintf("weed volume -port=8444 -max=0 %s %s",
fmt.Sprintf("-ip=$(POD_NAME).%s-volume", m.Name),
fmt.Sprintf("-mserver=%s-master-0.%s-master:9333,%s-master-1.%s-master:9333,%s-master-2.%s-master:9333",
m.Name, m.Name, m.Name, m.Name, m.Name, m.Name),
),
},
Ports: []corev1.ContainerPort{
{
ContainerPort: 8444,
Name: "swfs-volume",
},
{
ContainerPort: 18444,
},
},
/*
ReadinessProbe: &corev1.Probe{
Handler: corev1.Handler{
HTTPGet: &corev1.HTTPGetAction{
Path: "/cluster/status",
Port: intstr.IntOrString{
Type: 0,
IntVal: 9333,
},
Scheme: "http",
},
},
InitialDelaySeconds: 5,
TimeoutSeconds: 0,
PeriodSeconds: 15,
SuccessThreshold: 2,
FailureThreshold: 100,
},
LivenessProbe: &corev1.Probe{
Handler: corev1.Handler{
HTTPGet: &corev1.HTTPGetAction{
Path: "/cluster/status",
Port: intstr.IntOrString{
Type: 0,
IntVal: 9333,
},
Scheme: "http",
},
},
InitialDelaySeconds: 20,
TimeoutSeconds: 0,
PeriodSeconds: 10,
SuccessThreshold: 1,
FailureThreshold: 6,
},
*/
}},
},
Spec: volumePodSpec,
},
VolumeClaimTemplates: persistentVolumeClaims,
},
}
return dep

73
controllers/helper.go Normal file
View File

@ -0,0 +1,73 @@
package controllers
import (
"fmt"
"strings"
seaweedv1 "github.com/seaweedfs/seaweedfs-operator/api/v1"
corev1 "k8s.io/api/core/v1"
ctrl "sigs.k8s.io/controller-runtime"
)
const (
masterPeerAddressPattern = "%s-master-%d.%s-master-peer.%s:9333"
)
var (
kubernetesEnvVars = []corev1.EnvVar{
{
Name: "POD_IP",
ValueFrom: &corev1.EnvVarSource{
FieldRef: &corev1.ObjectFieldSelector{
FieldPath: "status.podIP",
},
},
},
{
Name: "POD_NAME",
ValueFrom: &corev1.EnvVarSource{
FieldRef: &corev1.ObjectFieldSelector{
FieldPath: "metadata.name",
},
},
},
{
Name: "NAMESPACE",
ValueFrom: &corev1.EnvVarSource{
FieldRef: &corev1.ObjectFieldSelector{
FieldPath: "metadata.namespace",
},
},
},
}
)
func ReconcileResult(err error) (bool, ctrl.Result, error) {
if err != nil {
return true, ctrl.Result{}, err
}
return false, ctrl.Result{}, nil
}
func getMasterAddresses(namespace string, name string, replicas int32) []string {
peersAddresses := make([]string, 0, replicas)
for i := int32(0); i < replicas; i++ {
peersAddresses = append(peersAddresses, fmt.Sprintf(masterPeerAddressPattern, name, i, name, namespace))
}
return peersAddresses
}
func getMasterPeersString(m *seaweedv1.Seaweed) string {
return strings.Join(getMasterAddresses(m.Namespace, m.Name, m.Spec.Master.Replicas), ",")
}
func copyAnnotations(src map[string]string) map[string]string {
if src == nil {
return nil
}
dst := map[string]string{}
for k, v := range src {
dst[k] = v
}
return dst
}

View File

@ -0,0 +1,22 @@
package label
const (
// The following labels are recommended by kubernetes https://kubernetes.io/docs/concepts/overview/working-with-objects/common-labels/
// ManagedByLabelKey is Kubernetes recommended label key, it represents the tool being used to manage the operation of an application
// For resources managed by SeaweedFS Operator, its value is always seaweedfs-operator
ManagedByLabelKey string = "app.kubernetes.io/managed-by"
// ComponentLabelKey is Kubernetes recommended label key, it represents the component within the architecture
ComponentLabelKey string = "app.kubernetes.io/component"
// NameLabelKey is Kubernetes recommended label key, it represents the name of the application
NameLabelKey string = "app.kubernetes.io/name"
// InstanceLabelKey is Kubernetes recommended label key, it represents a unique name identifying the instance of an application
// It's set by helm when installing a release
InstanceLabelKey string = "app.kubernetes.io/instance"
// VersionLabelKey is Kubernetes recommended label key, it represents the version of the app
VersionLabelKey string = "app.kubernetes.io/version"
// PodName is to select pod by name
// https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#pod-selector
PodName string = "statefulset.kubernetes.io/pod-name"
)

View File

@ -38,7 +38,14 @@ type SeaweedReconciler struct {
// +kubebuilder:rbac:groups=seaweed.seaweedfs.com,resources=seaweeds,verbs=get;list;watch;create;update;patch;delete
// +kubebuilder:rbac:groups=seaweed.seaweedfs.com,resources=seaweeds/status,verbs=get;update;patch
// +kubebuilder:rbac:groups=apps,resources=statefulsets,verbs=get;list;watch;create;update;patch;delete
// +kubebuilder:rbac:groups=core,resources=services,verbs=get;list;watch;create;update;patch;delete
// +kubebuilder:rbac:groups=core,resources=configmaps,verbs=get;list;watch;create;update;patch;delete
// +kubebuilder:rbac:groups=extensions,resources=ingresses,verbs=get;list;watch;create;update;patch;delete
// +kubebuilder:rbac:groups=networking.k8s.io,resources=ingresses,verbs=get;list;watch;create;update;patch;delete
// +kubebuilder:rbac:groups=core,resources=pods,verbs=get;list;
// Reconcile implements the reconcilation logic
func (r *SeaweedReconciler) Reconcile(req ctrl.Request) (ctrl.Result, error) {
ctx := context.Background()
log := r.Log.WithValues("seaweed", req.NamespacedName)
@ -50,17 +57,6 @@ func (r *SeaweedReconciler) Reconcile(req ctrl.Request) (ctrl.Result, error) {
return result, err
}
// temporary
if seaweedCR.Spec.VolumeServerCount == 0 {
seaweedCR.Spec.VolumeServerCount = 1
}
if seaweedCR.Spec.FilerCount == 0 {
seaweedCR.Spec.FilerCount = 1
}
if seaweedCR.Spec.S3Count == 0 {
seaweedCR.Spec.S3Count = 1
}
if done, result, err = r.ensureMaster(seaweedCR); done {
return result, err
}
@ -73,11 +69,17 @@ func (r *SeaweedReconciler) Reconcile(req ctrl.Request) (ctrl.Result, error) {
return result, err
}
if done, result, err = r.ensureS3Servers(seaweedCR); done {
if done, result, err = r.ensureSeaweedIngress(seaweedCR); done {
return result, err
}
return ctrl.Result{}, nil
if false {
if done, result, err = r.maintenance(seaweedCR); done {
return result, err
}
}
return ctrl.Result{RequeueAfter: 5 * time.Second}, nil
}
func (r *SeaweedReconciler) findSeaweedCustomResourceInstance(ctx context.Context, log logr.Logger, req ctrl.Request) (*seaweedv1.Seaweed, bool, ctrl.Result, error) {

View File

@ -0,0 +1,94 @@
package controllers
import (
"context"
"time"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
appsv1 "k8s.io/api/apps/v1"
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
seaweedv1 "github.com/seaweedfs/seaweedfs-operator/api/v1"
)
var (
TrueValue = true
FalseVallue = false
)
var _ = Describe("Seaweed Controller", func() {
Context("Basic Functionality", func() {
It("Should create StatefulSets", func() {
By("By creating a new Seaweed", func() {
const (
namespace = "default"
name = "test-seaweed"
timeout = time.Second * 30
interval = time.Millisecond * 250
)
ctx := context.Background()
seaweed := &seaweedv1.Seaweed{
ObjectMeta: metav1.ObjectMeta{
Namespace: namespace,
Name: name,
},
Spec: seaweedv1.SeaweedSpec{
Image: "chrislusf/seaweedfs:2.96",
VolumeServerDiskCount: 1,
Master: &seaweedv1.MasterSpec{
Replicas: 3,
ConcurrentStart: &TrueValue,
},
Volume: &seaweedv1.VolumeSpec{
Replicas: 1,
ResourceRequirements: corev1.ResourceRequirements{
Requests: corev1.ResourceList{
corev1.ResourceStorage: resource.MustParse("1Gi"),
},
},
},
Filer: &seaweedv1.FilerSpec{
Replicas: 2,
},
},
}
Expect(k8sClient.Create(ctx, seaweed)).Should(Succeed())
masterKey := types.NamespacedName{Name: name + "-master", Namespace: namespace}
volumeKey := types.NamespacedName{Name: name + "-volume", Namespace: namespace}
filerKey := types.NamespacedName{Name: name + "-filer", Namespace: namespace}
masterSts := &appsv1.StatefulSet{}
volumeSts := &appsv1.StatefulSet{}
filerSts := &appsv1.StatefulSet{}
Eventually(func() bool {
err := k8sClient.Get(ctx, masterKey, masterSts)
return err == nil
}, timeout, interval).Should(BeTrue())
Expect(masterSts.Spec.Replicas).ShouldNot(BeNil())
Expect(*masterSts.Spec.Replicas).Should(Equal(seaweed.Spec.Master.Replicas))
Eventually(func() bool {
err := k8sClient.Get(ctx, volumeKey, volumeSts)
return err == nil
}, timeout, interval).Should(BeTrue())
Expect(volumeSts.Spec.Replicas).ShouldNot(BeNil())
Expect(*volumeSts.Spec.Replicas).Should(Equal(seaweed.Spec.Volume.Replicas))
Eventually(func() bool {
err := k8sClient.Get(ctx, filerKey, filerSts)
return err == nil
}, timeout, interval).Should(BeTrue())
Expect(filerSts.Spec.Replicas).ShouldNot(BeNil())
Expect(*filerSts.Spec.Replicas).Should(Equal(seaweed.Spec.Filer.Replicas))
})
})
})
})

View File

@ -0,0 +1,37 @@
package controllers
import (
"io/ioutil"
"os"
seaweedv1 "github.com/seaweedfs/seaweedfs-operator/api/v1"
"github.com/seaweedfs/seaweedfs-operator/controllers/swadmin"
ctrl "sigs.k8s.io/controller-runtime"
)
func (r *SeaweedReconciler) maintenance(m *seaweedv1.Seaweed) (done bool, result ctrl.Result, err error) {
masters := getMasterPeersString(m)
r.Log.V(0).Info("wait to connect to masters", "masters", masters)
// this step blocks since the operator can not access the masters when running from outside of the k8s cluster
sa := swadmin.NewSeaweedAdmin(masters, ioutil.Discard)
// For now this is an example of the admin commands
// master by default has some maintenance commands already.
r.Log.V(0).Info("volume.list")
sa.Output = os.Stdout
if err := sa.ProcessCommand("volume.list"); err != nil {
r.Log.V(0).Info("volume.list", "error", err)
}
sa.ProcessCommand("lock")
if err := sa.ProcessCommand("volume.balance -force"); err != nil {
r.Log.V(0).Info("volume.balance", "error", err)
}
sa.ProcessCommand("unlock")
return ReconcileResult(nil)
}

View File

@ -24,6 +24,7 @@ import (
. "github.com/onsi/gomega"
"k8s.io/client-go/kubernetes/scheme"
"k8s.io/client-go/rest"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/envtest"
"sigs.k8s.io/controller-runtime/pkg/envtest/printer"
@ -68,6 +69,23 @@ var _ = BeforeSuite(func(done Done) {
// +kubebuilder:scaffold:scheme
k8sClient, err = client.New(cfg, client.Options{Scheme: scheme.Scheme})
k8sManager, err := ctrl.NewManager(cfg, ctrl.Options{
Scheme: scheme.Scheme,
})
Expect(err).ToNot(HaveOccurred())
err = (&SeaweedReconciler{
Client: k8sManager.GetClient(),
Log: ctrl.Log.WithName("controllers").WithName("Seaweed"),
Scheme: k8sManager.GetScheme(),
}).SetupWithManager(k8sManager)
Expect(err).ToNot(HaveOccurred())
go func() {
err = k8sManager.Start(ctrl.SetupSignalHandler())
Expect(err).ToNot(HaveOccurred())
}()
Expect(err).ToNot(HaveOccurred())
Expect(k8sClient).ToNot(BeNil())

View File

@ -0,0 +1,67 @@
package swadmin
import (
"fmt"
"io"
"regexp"
"strings"
"github.com/chrislusf/seaweedfs/weed/shell"
"google.golang.org/grpc"
)
type SeaweedAdmin struct {
commandReg *regexp.Regexp
commandEnv *shell.CommandEnv
Output io.Writer
}
func NewSeaweedAdmin(masters string, output io.Writer) *SeaweedAdmin {
var shellOptions shell.ShellOptions
shellOptions.GrpcDialOption = grpc.WithInsecure()
shellOptions.Masters = &masters
commandEnv := shell.NewCommandEnv(shellOptions)
reg, _ := regexp.Compile(`'.*?'|".*?"|\S+`)
go commandEnv.MasterClient.LoopConnectToMaster()
return &SeaweedAdmin{
commandEnv: commandEnv,
commandReg: reg,
Output: output,
}
}
// ProcessCommands cmds can be semi-colon separated commands
func (sa *SeaweedAdmin) ProcessCommands(cmds string) error {
for _, c := range strings.Split(cmds, ";") {
if err := sa.ProcessCommand(c); err != nil {
return err
}
}
return nil
}
func (sa *SeaweedAdmin) ProcessCommand(cmd string) error {
sa.commandEnv.MasterClient.WaitUntilConnected()
cmds := sa.commandReg.FindAllString(cmd, -1)
if len(cmds) == 0 {
return nil
}
args := make([]string, len(cmds[1:]))
for i := range args {
args[i] = strings.Trim(string(cmds[1+i]), "\"'")
}
for _, c := range shell.Commands {
if c.Name() == cmds[0] || c.Name() == "fs."+cmds[0] {
return c.Do(args, sa.commandEnv, sa.Output)
}
}
return fmt.Errorf("unknown command: %v", cmd)
}

94
go.mod
View File

@ -1,13 +1,101 @@
module github.com/seaweedfs/seaweedfs-operator
go 1.13
go 1.17
require (
github.com/chrislusf/seaweedfs v0.0.0-20211103083639-3c245c69d369
github.com/go-logr/logr v0.1.0
github.com/onsi/ginkgo v1.11.0
github.com/onsi/gomega v1.8.1
github.com/onsi/ginkgo v1.14.2
github.com/onsi/gomega v1.10.4
google.golang.org/grpc v1.40.0
k8s.io/api v0.18.2
k8s.io/apimachinery v0.18.2
k8s.io/client-go v0.18.2
k8s.io/klog v1.0.0
sigs.k8s.io/controller-runtime v0.6.0
)
require (
cloud.google.com/go v0.94.1 // indirect
github.com/aws/aws-sdk-go v1.35.3 // indirect
github.com/beorn7/perks v1.0.1 // indirect
github.com/cespare/xxhash/v2 v2.1.1 // indirect
github.com/davecgh/go-spew v1.1.1 // indirect
github.com/disintegration/imaging v1.6.2 // indirect
github.com/evanphx/json-patch v4.5.0+incompatible // indirect
github.com/fsnotify/fsnotify v1.4.9 // indirect
github.com/go-errors/errors v1.1.1 // indirect
github.com/go-logr/zapr v0.1.0 // indirect
github.com/gogo/protobuf v1.3.2 // indirect
github.com/golang-jwt/jwt v3.2.1+incompatible // indirect
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect
github.com/golang/protobuf v1.5.2 // indirect
github.com/golang/snappy v0.0.4 // indirect
github.com/google/btree v1.0.0 // indirect
github.com/google/go-cmp v0.5.6 // indirect
github.com/google/gofuzz v1.1.0 // indirect
github.com/google/uuid v1.3.0 // indirect
github.com/googleapis/gax-go/v2 v2.1.0 // indirect
github.com/googleapis/gnostic v0.3.1 // indirect
github.com/grpc-ecosystem/go-grpc-middleware v1.1.0 // indirect
github.com/hashicorp/golang-lru v0.5.1 // indirect
github.com/hashicorp/hcl v1.0.0 // indirect
github.com/imdario/mergo v0.3.6 // indirect
github.com/jmespath/go-jmespath v0.4.0 // indirect
github.com/json-iterator/go v1.1.11 // indirect
github.com/karlseguin/ccache/v2 v2.0.7 // indirect
github.com/klauspost/cpuid v1.2.1 // indirect
github.com/klauspost/reedsolomon v1.9.2 // indirect
github.com/magiconair/properties v1.8.1 // indirect
github.com/mattn/go-runewidth v0.0.7 // indirect
github.com/matttproud/golang_protobuf_extensions v1.0.1 // indirect
github.com/mitchellh/mapstructure v1.1.2 // indirect
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
github.com/modern-go/reflect2 v1.0.1 // indirect
github.com/nxadm/tail v1.4.4 // indirect
github.com/pelletier/go-toml v1.7.0 // indirect
github.com/peterh/liner v1.1.0 // indirect
github.com/pkg/errors v0.9.1 // indirect
github.com/prometheus/client_golang v1.11.0 // indirect
github.com/prometheus/client_model v0.2.0 // indirect
github.com/prometheus/common v0.26.0 // indirect
github.com/prometheus/procfs v0.6.0 // indirect
github.com/seaweedfs/goexif v1.0.2 // indirect
github.com/spf13/afero v1.6.0 // indirect
github.com/spf13/cast v1.3.0 // indirect
github.com/spf13/jwalterweatherman v1.1.0 // indirect
github.com/spf13/pflag v1.0.5 // indirect
github.com/spf13/viper v1.4.0 // indirect
github.com/syndtr/goleveldb v1.0.1-0.20190318030020-c3a204f8e965 // indirect
github.com/tylertreat/BoomFilters v0.0.0-20210315201527-1a82519a3e43 // indirect
github.com/valyala/bytebufferpool v1.0.0 // indirect
github.com/viant/ptrie v0.3.0 // indirect
github.com/viant/toolbox v0.33.2 // indirect
go.uber.org/atomic v1.7.0 // indirect
go.uber.org/multierr v1.7.0 // indirect
go.uber.org/zap v1.17.0 // indirect
golang.org/x/crypto v0.0.0-20201216223049-8b5274cf687f // indirect
golang.org/x/image v0.0.0-20200119044424-58c23975cae1 // indirect
golang.org/x/net v0.0.0-20210813160813-60bc85c4be6d // indirect
golang.org/x/oauth2 v0.0.0-20210819190943-2bc19b11175f // indirect
golang.org/x/sys v0.0.0-20210908233432-aa78b53d3365 // indirect
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1 // indirect
golang.org/x/text v0.3.6 // indirect
golang.org/x/time v0.0.0-20191024005414-555d28b269f0 // indirect
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 // indirect
gomodules.xyz/jsonpatch/v2 v2.0.1 // indirect
google.golang.org/api v0.57.0 // indirect
google.golang.org/appengine v1.6.7 // indirect
google.golang.org/genproto v0.0.0-20210921142501-181ce0d877f6 // indirect
google.golang.org/protobuf v1.27.1 // indirect
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c // indirect
gopkg.in/fsnotify.v1 v1.4.7 // indirect
gopkg.in/inf.v0 v0.9.1 // indirect
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 // indirect
gopkg.in/yaml.v2 v2.4.0 // indirect
k8s.io/apiextensions-apiserver v0.18.2 // indirect
k8s.io/kube-openapi v0.0.0-20200121204235-bf4fb3bd569c // indirect
k8s.io/utils v0.0.0-20200324210504-a9aa75ae1b89 // indirect
sigs.k8s.io/structured-merge-diff/v3 v3.0.0 // indirect
sigs.k8s.io/yaml v1.2.0 // indirect
)

1081
go.sum

File diff suppressed because it is too large Load Diff

32
hack/verify-codegen.sh Executable file
View File

@ -0,0 +1,32 @@
#!/usr/bin/env bash
set -o errexit
set -o nounset
set -o pipefail
ROOT=$(unset CDPATH && cd $(dirname "${BASH_SOURCE[0]}")/.. && pwd)
DIFFROOT="${ROOT}/api"
TMP_DIFFROOT="${ROOT}/_tmp/api"
_tmp="${ROOT}/_tmp"
cleanup() {
rm -rf "${_tmp}"
}
trap "cleanup" EXIT SIGINT
cleanup
mkdir -p "${TMP_DIFFROOT}"
cp -a "${DIFFROOT}"/* "${TMP_DIFFROOT}"
make generate
echo "diffing ${DIFFROOT} against freshly generated codegen"
ret=0
diff -Naupr "${DIFFROOT}" "${TMP_DIFFROOT}" || ret=$?
cp -a "${TMP_DIFFROOT}"/* "${DIFFROOT}"
if [[ $ret -eq 0 ]]; then
echo "${DIFFROOT} up to date."
else
echo "${DIFFROOT} is out of date. Please run make generate"
exit 1
fi

32
hack/verify-manifests.sh Executable file
View File

@ -0,0 +1,32 @@
#!/usr/bin/env bash
set -o errexit
set -o nounset
set -o pipefail
ROOT=$(unset CDPATH && cd $(dirname "${BASH_SOURCE[0]}")/.. && pwd)
DIFFROOT="${ROOT}/config"
TMP_DIFFROOT="${ROOT}/_tmp/config"
_tmp="${ROOT}/_tmp"
cleanup() {
rm -rf "${_tmp}"
}
trap "cleanup" EXIT SIGINT
cleanup
mkdir -p "${TMP_DIFFROOT}"
cp -a "${DIFFROOT}"/* "${TMP_DIFFROOT}"
make manifests
echo "diffing ${DIFFROOT} against freshly generated manifests"
ret=0
diff -Naupr "${DIFFROOT}" "${TMP_DIFFROOT}" || ret=$?
cp -a "${TMP_DIFFROOT}"/* "${DIFFROOT}"
if [[ $ret -eq 0 ]]; then
echo "${DIFFROOT} up to date."
else
echo "${DIFFROOT} is out of date. Please run make manifests"
exit 1
fi

View File

@ -75,6 +75,13 @@ func main() {
setupLog.Error(err, "unable to create controller", "controller", "Seaweed")
os.Exit(1)
}
if os.Getenv("ENABLE_WEBHOOKS") != "false" {
if err = (&seaweedv1.Seaweed{}).SetupWebhookWithManager(mgr); err != nil {
setupLog.Error(err, "unable to create webhook", "webhook", "Seaweed")
os.Exit(1)
}
}
// +kubebuilder:scaffold:builder
setupLog.Info("starting manager")

View File

@ -1,10 +1,5 @@
<?xml version="1.0" encoding="UTF-8"?>
<module type="WEB_MODULE" version="4">
<component name="CheckStyle-IDEA-Module">
<option name="configuration">
<map />
</option>
</component>
<component name="Go" enabled="true" />
<component name="NewModuleRootManager" inherit-compiler-output="true">
<exclude-output />