Skip to content

Commit 7eacbb7

Browse files
authored
feat(ec_join): update handler to return tcp connections required (#5004)
* feat(ec_join): add method to return all ready node ip addresses * feat(ec_join): update handler to return node ips * chore(test): create a struct and interface to allow kube client mocks in handlers * chore: use newly created struct * chore: tests for join handler * chore: moaaar tests * chore: tests for the worker and controller node IPs * chore: refactor endpoint to return full endpoint list vs node ips
1 parent e87a657 commit 7eacbb7

File tree

10 files changed

+689
-16
lines changed

10 files changed

+689
-16
lines changed

pkg/apiserver/server.go

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -157,7 +157,7 @@ func Start(params *APIServerParams) {
157157
loggingRouter := r.NewRoute().Subrouter()
158158
loggingRouter.Use(handlers.LoggingMiddleware)
159159

160-
handler := &handlers.Handler{}
160+
handler := handlers.NewHandler()
161161

162162
/**********************************************************************
163163
* Unauthenticated routes

pkg/embeddedcluster/node_join.go

Lines changed: 82 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -77,6 +77,75 @@ func GenerateAddNodeToken(ctx context.Context, client kbclient.Client, nodeRole
7777
return newToken, nil
7878
}
7979

80+
// GetendpointsToCheck returns the list of endpoints that should be checked by a node joining the cluster
81+
// based on the array of roles the node will have
82+
func GetEndpointsToCheck(ctx context.Context, client kbclient.Client, roles []string) ([]string, error) {
83+
controllerRoleName, err := ControllerRoleName(ctx, client)
84+
if err != nil {
85+
return nil, fmt.Errorf("failed to get controller role name: %w", err)
86+
}
87+
88+
isController := false
89+
for _, role := range roles {
90+
if role == controllerRoleName {
91+
isController = true
92+
break
93+
}
94+
}
95+
controllerAddr, workerAddr, err := getAllNodeIPAddresses(ctx, client)
96+
if err != nil {
97+
return nil, fmt.Errorf("failed to get all node IP addresses: %w", err)
98+
}
99+
100+
endpoints := []string{}
101+
for _, addr := range controllerAddr {
102+
// any joining node should be able to reach the kube-api port and k0s-api port on all the controllers
103+
endpoints = append(endpoints, fmt.Sprintf("%s:6443", addr), fmt.Sprintf("%s:9443", addr))
104+
if isController {
105+
// controllers should be able to reach the etcd and kubelet ports on the controllers
106+
endpoints = append(endpoints, fmt.Sprintf("%s:2380", addr), fmt.Sprintf("%s:10250", addr))
107+
}
108+
}
109+
if isController {
110+
for _, addr := range workerAddr {
111+
// controllers should be able to reach the kubelet port on the workers
112+
endpoints = append(endpoints, fmt.Sprintf("%s:10250", addr))
113+
}
114+
}
115+
return endpoints, nil
116+
}
117+
118+
// getAllNodeIPAddresses returns the internal IP addresses of all the ready nodes in the cluster grouped by
119+
// controller and worker nodes respectively
120+
func getAllNodeIPAddresses(ctx context.Context, client kbclient.Client) ([]string, []string, error) {
121+
var nodes corev1.NodeList
122+
if err := client.List(ctx, &nodes); err != nil {
123+
return nil, nil, fmt.Errorf("failed to list nodes: %w", err)
124+
}
125+
126+
controllerAddr := []string{}
127+
workerAddr := []string{}
128+
for _, node := range nodes.Items {
129+
// Only consider nodes that are ready
130+
if !isReady(node) {
131+
continue
132+
}
133+
134+
// Filter nodes by control-plane and worker roles
135+
if cp, ok := node.Labels["node-role.kubernetes.io/control-plane"]; ok && cp == "true" {
136+
if addr := findInternalIPAddress(node.Status.Addresses); addr != nil {
137+
controllerAddr = append(controllerAddr, addr.Address)
138+
}
139+
} else {
140+
if addr := findInternalIPAddress(node.Status.Addresses); addr != nil {
141+
workerAddr = append(workerAddr, addr.Address)
142+
}
143+
}
144+
}
145+
146+
return controllerAddr, workerAddr, nil
147+
}
148+
80149
func makeK0sToken(ctx context.Context, client kbclient.Client, nodeRole string) (string, error) {
81150
rawToken, err := k8sutil.GenerateK0sBootstrapToken(client, time.Hour, nodeRole)
82151
if err != nil {
@@ -89,7 +158,7 @@ func makeK0sToken(ctx context.Context, client kbclient.Client, nodeRole string)
89158
}
90159
cert = base64.StdEncoding.EncodeToString([]byte(cert))
91160

92-
firstPrimary, err := firstPrimaryIpAddress(ctx, client)
161+
firstPrimary, err := firstPrimaryIPAddress(ctx, client)
93162
if err != nil {
94163
return "", fmt.Errorf("failed to get first primary ip address: %w", err)
95164
}
@@ -111,7 +180,7 @@ func makeK0sToken(ctx context.Context, client kbclient.Client, nodeRole string)
111180
return b64Token, nil
112181
}
113182

114-
func firstPrimaryIpAddress(ctx context.Context, client kbclient.Client) (string, error) {
183+
func firstPrimaryIPAddress(ctx context.Context, client kbclient.Client) (string, error) {
115184
var nodes corev1.NodeList
116185
if err := client.List(ctx, &nodes); err != nil {
117186
return "", fmt.Errorf("failed to list nodes: %w", err)
@@ -122,16 +191,23 @@ func firstPrimaryIpAddress(ctx context.Context, client kbclient.Client) (string,
122191
continue
123192
}
124193

125-
for _, address := range node.Status.Addresses {
126-
if address.Type == "InternalIP" {
127-
return address.Address, nil
128-
}
194+
if addr := findInternalIPAddress(node.Status.Addresses); addr != nil {
195+
return addr.Address, nil
129196
}
130197
}
131198

132199
return "", fmt.Errorf("failed to find controller node")
133200
}
134201

202+
func findInternalIPAddress(addresses []corev1.NodeAddress) *corev1.NodeAddress {
203+
for _, address := range addresses {
204+
if address.Type == "InternalIP" {
205+
return &address
206+
}
207+
}
208+
return nil
209+
}
210+
135211
// GenerateAddNodeCommand returns the command a user should run to add a node with the provided token
136212
// the command will be of the form 'embeddedcluster node join ip:port UUID'
137213
func GenerateAddNodeCommand(ctx context.Context, kbClient kbclient.Client, token string, isAirgap bool) (string, error) {

pkg/embeddedcluster/node_join_test.go

Lines changed: 255 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -11,6 +11,7 @@ import (
1111
corev1 "k8s.io/api/core/v1"
1212
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
1313
"k8s.io/apimachinery/pkg/runtime"
14+
kbclient "sigs.k8s.io/controller-runtime/pkg/client"
1415
"sigs.k8s.io/controller-runtime/pkg/client/fake"
1516
)
1617

@@ -99,3 +100,257 @@ func TestGenerateAddNodeCommand(t *testing.T) {
99100
wantCommand = "sudo ./my-app join --airgap-bundle my-app.airgap 192.168.0.100:30000 token"
100101
req.Equal(wantCommand, gotCommand)
101102
}
103+
104+
func TestGetAllNodeIPAddresses(t *testing.T) {
105+
scheme := runtime.NewScheme()
106+
corev1.AddToScheme(scheme)
107+
embeddedclusterv1beta1.AddToScheme(scheme)
108+
109+
tests := []struct {
110+
name string
111+
roles []string
112+
kbClient kbclient.Client
113+
expectedEndpoints []string
114+
}{
115+
{
116+
name: "no nodes",
117+
roles: []string{"some-role"},
118+
kbClient: fake.NewClientBuilder().WithScheme(scheme).WithObjects(
119+
&embeddedclusterv1beta1.Installation{
120+
ObjectMeta: metav1.ObjectMeta{
121+
Name: time.Now().Format("20060102150405"),
122+
},
123+
Spec: embeddedclusterv1beta1.InstallationSpec{
124+
BinaryName: "my-app",
125+
Config: &embeddedclusterv1beta1.ConfigSpec{
126+
Version: "v1.100.0",
127+
Roles: embeddedclusterv1beta1.Roles{
128+
Controller: embeddedclusterv1beta1.NodeRole{
129+
Name: "controller-role",
130+
},
131+
},
132+
},
133+
},
134+
},
135+
).Build(),
136+
expectedEndpoints: []string{},
137+
},
138+
{
139+
name: "worker node joining cluster with 1 controller and 1 worker",
140+
roles: []string{"some-role"},
141+
kbClient: fake.NewClientBuilder().WithScheme(scheme).WithObjects(
142+
&embeddedclusterv1beta1.Installation{
143+
ObjectMeta: metav1.ObjectMeta{
144+
Name: time.Now().Format("20060102150405"),
145+
},
146+
Spec: embeddedclusterv1beta1.InstallationSpec{
147+
BinaryName: "my-app",
148+
Config: &embeddedclusterv1beta1.ConfigSpec{
149+
Version: "v1.100.0",
150+
Roles: embeddedclusterv1beta1.Roles{
151+
Controller: embeddedclusterv1beta1.NodeRole{
152+
Name: "controller-role",
153+
},
154+
},
155+
},
156+
},
157+
},
158+
&corev1.Node{
159+
ObjectMeta: metav1.ObjectMeta{
160+
Name: "controller",
161+
Labels: map[string]string{
162+
"node-role.kubernetes.io/control-plane": "true",
163+
},
164+
},
165+
Status: corev1.NodeStatus{
166+
Conditions: []corev1.NodeCondition{
167+
{
168+
Type: corev1.NodeReady,
169+
Status: corev1.ConditionTrue,
170+
},
171+
},
172+
Addresses: []corev1.NodeAddress{
173+
{
174+
Type: corev1.NodeInternalIP,
175+
Address: "192.168.0.100",
176+
},
177+
},
178+
},
179+
},
180+
&corev1.Node{
181+
ObjectMeta: metav1.ObjectMeta{
182+
Name: "worker",
183+
Labels: map[string]string{
184+
"node-role.kubernetes.io/control-plane": "false",
185+
},
186+
},
187+
Status: corev1.NodeStatus{
188+
Conditions: []corev1.NodeCondition{
189+
{
190+
Type: corev1.NodeReady,
191+
Status: corev1.ConditionTrue,
192+
},
193+
},
194+
Addresses: []corev1.NodeAddress{
195+
{
196+
Type: corev1.NodeInternalIP,
197+
Address: "192.168.0.101",
198+
},
199+
},
200+
},
201+
},
202+
).Build(),
203+
expectedEndpoints: []string{"192.168.0.100:6443", "192.168.0.100:9443"},
204+
},
205+
{
206+
name: "controller node joining cluster with 2 controller ready, 1 controller not ready, 1 worker ready, 1 worker not ready",
207+
roles: []string{"controller-role"},
208+
kbClient: fake.NewClientBuilder().WithScheme(scheme).WithObjects(
209+
&embeddedclusterv1beta1.Installation{
210+
ObjectMeta: metav1.ObjectMeta{
211+
Name: time.Now().Format("20060102150405"),
212+
},
213+
Spec: embeddedclusterv1beta1.InstallationSpec{
214+
BinaryName: "my-app",
215+
Config: &embeddedclusterv1beta1.ConfigSpec{
216+
Version: "v1.100.0",
217+
Roles: embeddedclusterv1beta1.Roles{
218+
Controller: embeddedclusterv1beta1.NodeRole{
219+
Name: "controller-role",
220+
},
221+
},
222+
},
223+
},
224+
},
225+
&corev1.Node{
226+
ObjectMeta: metav1.ObjectMeta{
227+
Name: "controller 1",
228+
Labels: map[string]string{
229+
"node-role.kubernetes.io/control-plane": "true",
230+
},
231+
},
232+
Status: corev1.NodeStatus{
233+
Conditions: []corev1.NodeCondition{
234+
{
235+
Type: corev1.NodeReady,
236+
Status: corev1.ConditionTrue,
237+
},
238+
},
239+
Addresses: []corev1.NodeAddress{
240+
{
241+
Type: corev1.NodeInternalIP,
242+
Address: "192.168.0.100",
243+
},
244+
},
245+
},
246+
},
247+
&corev1.Node{
248+
ObjectMeta: metav1.ObjectMeta{
249+
Name: "controller 2",
250+
Labels: map[string]string{
251+
"node-role.kubernetes.io/control-plane": "true",
252+
},
253+
},
254+
Status: corev1.NodeStatus{
255+
Conditions: []corev1.NodeCondition{
256+
{
257+
Type: corev1.NodeReady,
258+
Status: corev1.ConditionFalse,
259+
},
260+
},
261+
Addresses: []corev1.NodeAddress{
262+
{
263+
Type: corev1.NodeInternalIP,
264+
Address: "192.168.0.101",
265+
},
266+
},
267+
},
268+
},
269+
&corev1.Node{
270+
ObjectMeta: metav1.ObjectMeta{
271+
Name: "controller 3",
272+
Labels: map[string]string{
273+
"node-role.kubernetes.io/control-plane": "true",
274+
},
275+
},
276+
Status: corev1.NodeStatus{
277+
Conditions: []corev1.NodeCondition{
278+
{
279+
Type: corev1.NodeReady,
280+
Status: corev1.ConditionTrue,
281+
},
282+
},
283+
Addresses: []corev1.NodeAddress{
284+
{
285+
Type: corev1.NodeInternalIP,
286+
Address: "192.168.0.102",
287+
},
288+
},
289+
},
290+
},
291+
&corev1.Node{
292+
ObjectMeta: metav1.ObjectMeta{
293+
Name: "worker 1",
294+
Labels: map[string]string{},
295+
},
296+
Status: corev1.NodeStatus{
297+
Conditions: []corev1.NodeCondition{
298+
{
299+
Type: corev1.NodeReady,
300+
Status: corev1.ConditionTrue,
301+
},
302+
},
303+
Addresses: []corev1.NodeAddress{
304+
{
305+
Type: corev1.NodeInternalIP,
306+
Address: "192.168.0.103",
307+
},
308+
},
309+
},
310+
},
311+
&corev1.Node{
312+
ObjectMeta: metav1.ObjectMeta{
313+
Name: "worker 2",
314+
Labels: map[string]string{
315+
"node-role.kubernetes.io/control-plane": "false",
316+
},
317+
},
318+
Status: corev1.NodeStatus{
319+
Conditions: []corev1.NodeCondition{
320+
{
321+
Type: corev1.NodeReady,
322+
Status: corev1.ConditionFalse,
323+
},
324+
},
325+
Addresses: []corev1.NodeAddress{
326+
{
327+
Type: corev1.NodeInternalIP,
328+
Address: "192.168.0.104",
329+
},
330+
},
331+
},
332+
},
333+
).Build(),
334+
expectedEndpoints: []string{
335+
"192.168.0.100:6443",
336+
"192.168.0.100:9443",
337+
"192.168.0.100:2380",
338+
"192.168.0.100:10250",
339+
"192.168.0.102:6443",
340+
"192.168.0.102:9443",
341+
"192.168.0.102:2380",
342+
"192.168.0.102:10250",
343+
"192.168.0.103:10250",
344+
},
345+
},
346+
}
347+
348+
for _, test := range tests {
349+
t.Run(test.name, func(t *testing.T) {
350+
req := require.New(t)
351+
endpoints, err := GetEndpointsToCheck(context.Background(), test.kbClient, test.roles)
352+
req.NoError(err)
353+
req.Equal(test.expectedEndpoints, endpoints)
354+
})
355+
}
356+
}

0 commit comments

Comments
 (0)