Skip to content

Commit

Permalink
Add Linode Provioner
Browse files Browse the repository at this point in the history
Adds Linode provisioner in operator by integrating inletsctl which supports Linode starting from 0.5.5.

Signed-off-by: Ze Chen <[email protected]>
  • Loading branch information
zechen0 authored and alexellis committed Jul 14, 2020
1 parent e0438af commit 2682661
Show file tree
Hide file tree
Showing 12 changed files with 275 additions and 119 deletions.
56 changes: 56 additions & 0 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -49,6 +49,7 @@ Operator cloud host provisioning:
- [x] Provision to Scaleway
- [x] Provision to GCP
- [x] Provision to AWS EC2
- [x] Provision to Linode
- [x] Publish stand-alone [Go provisioning library/SDK](https://github.com/inlets/inletsctl/tree/master/pkg/provision)

With [`inlets-pro`](https://github.com/inlets/inlets-pro) configured, you get the following additional benefits:
Expand Down Expand Up @@ -226,6 +227,61 @@ helm upgrade inlets-operator --install inlets/inlets-operator \
--set provider=gce,zone=us-central1-a,projectID=$PROJECTID
```

## Running in-cluster, using Linode for the exit node

Install using helm:
```bash
kubectl apply -f ./artifacts/crds/

# Create a secret to store the service account key file
kubectl create secret generic inlets-access-key --from-literal inlets-access-key=<Linode API Access Key>

# Add and update the inlets-operator helm repo
helm repo add inlets https://inlets.github.io/inlets-operator/

helm repo update

# Install inlets-operator with the required fields
helm upgrade inlets-operator --install inlets/inlets-operator \
--set provider=linode,region=us-east
```

You can also install the inlets-operator using a single command using [arkade](https://get-arkade.dev/), arkade runs against any Kubernetes cluster.

Install with inlets PRO:

```bash
arkade install inlets-operator \
--provider linode \
--region us-east \
--access-key <Linode API Access Key> \
--license $(cat $HOME/inlets-pro-license.txt)
```

Install with inlets OSS:

```bash
arkade install inlets-operator \
--provider linode \
--region us-east \
--access-key <Linode API Access Key>
```

You can also install using kubectl without helm: (Change `-provider` and `-region` in `./artifacts/operator.yaml`)

```bash
# Create a secret to store the access token

kubectl create secret generic inlets-access-key \
--from-literal inlets-access-key=<Linode API Access Key>

kubectl apply -f ./artifacts/crds/

# Apply the operator deployment and RBAC role
kubectl apply -f ./artifacts/operator-rbac.yaml
kubectl apply -f ./artifacts/operator.yaml
```

## Expose a service with a LoadBalancer

The LoadBalancer type is usually provided by a cloud controller, but when that is not available, then you can use the inlets-operator to get a public IP and ingress. The free OSS version of inlets provides a HTTP tunnel, inlets PRO can provide TCP and full functionality to an IngressController.
Expand Down
16 changes: 15 additions & 1 deletion chart/inlets-operator/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -70,7 +70,7 @@ helm upgrade inlets-operator --install inlets/inlets-operator \

```sh
helm upgrade inlets-operator --install inlets/inlets-operator \
--set provider=packet,region=ams1,projectID=PROJECTID,inletsProLicense=WT_GOES_HERE
--set provider=packet,region=ams1,projectID=PROJECTID,inletsProLicense=JWT_GOES_HERE
```

### Scaleway with inlets OSS
Expand All @@ -80,6 +80,20 @@ helm upgrade inlets-operator --install inlets/inlets-operator \
--set provider=scaleway,region=ams1,organizationID=ORGANIZATIONID
```

### Linode with inlets OSS

```sh
helm upgrade inlets-operator --install inlets/inlets-operator \
--set provider=linode,region=us-east
```

### Linode with inlets-pro

```sh
helm upgrade inlets-operator --install inlets/inlets-operator \
--set provider=linode,region=us-east,inletsProLicense=JWT_GOES_HERE
```


## Chart parameters

Expand Down
71 changes: 61 additions & 10 deletions controller.go
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,7 @@
package main

import (
"context"
"encoding/base64"
"fmt"
"log"
Expand Down Expand Up @@ -152,6 +153,9 @@ func NewController(
case "civo":
provisioner, _ = provision.NewCivoProvisioner(controller.infraConfig.GetAccessKey())
break
case "linode":
provisioner, _ = provision.NewLinodeProvisioner(controller.infraConfig.GetAccessKey())
break
}

if provisioner != nil {
Expand Down Expand Up @@ -362,7 +366,7 @@ func (c *Controller) syncHandler(key string) error {

ops := metav1.GetOptions{}
name := service.Name + "-tunnel"
found, err := tunnels.Get(name, ops)
found, err := tunnels.Get(context.Background(), name, ops)

if errors.IsNotFound(err) {
if manageService(*c, *service) {
Expand Down Expand Up @@ -390,7 +394,8 @@ func (c *Controller) syncHandler(key string) error {
},
}

_, err := tunnels.Create(tunnel)
ops := metav1.CreateOptions{}
_, err := tunnels.Create(context.Background(), tunnel, ops)

if err != nil {
log.Printf("Error creating tunnel: %s", err.Error())
Expand All @@ -402,7 +407,7 @@ func (c *Controller) syncHandler(key string) error {
if manageService(*c, *service) == false {
log.Printf("Removing tunnel: %s\n", found.Name)

err := tunnels.Delete(found.Name, &metav1.DeleteOptions{})
err := tunnels.Delete(context.Background(), found.Name, metav1.DeleteOptions{})

if err != nil {
log.Printf("Error deleting tunnel: %s", err.Error())
Expand Down Expand Up @@ -430,6 +435,7 @@ func (c *Controller) syncHandler(key string) error {

var id string

log.Printf("Provisioning started with provider:%s host:%s\n", c.infraConfig.Provider, tunnel.Name)
start := time.Now()
if c.infraConfig.Provider == "packet" {

Expand Down Expand Up @@ -566,7 +572,27 @@ func (c *Controller) syncHandler(key string) error {
return err
}
id = res.ID
} else if c.infraConfig.Provider == "linode" {
provisioner, _ := provision.NewLinodeProvisioner(c.infraConfig.GetAccessKey())

userData := makeUserdata(tunnel.Spec.AuthToken, c.infraConfig.UsePro(), tunnel.Spec.ServiceName)

res, err := provisioner.Provision(provision.BasicHost{
Name: tunnel.Name,
OS: "linode/ubuntu16.04lts", // https://api.linode.com/v4/images
Plan: "g6-nanode-1", // https://api.linode.com/v4/linode/types
Region: c.infraConfig.Region,
UserData: userData,
Additional: map[string]string{},
})

if err != nil {
return err
}
id = res.ID

} else {
return fmt.Errorf("unsupported provider: %s", c.infraConfig.Provider)
}

log.Printf("Provisioning call took: %fs\n", time.Since(start).Seconds())
Expand Down Expand Up @@ -727,12 +753,37 @@ func (c *Controller) syncHandler(key string) error {
}
}
}
} else if c.infraConfig.Provider == "linode" {
provisioner, _ := provision.NewLinodeProvisioner(c.infraConfig.GetAccessKey())

host, err := provisioner.Status(tunnel.Status.HostID)

if err != nil {
return err
}

if host.Status == provision.ActiveStatus {
if host.IP != "" {
err := c.updateTunnelProvisioningStatus(tunnel, provision.ActiveStatus, host.ID, host.IP)
if err != nil {
return err
}

err = c.updateService(tunnel, host.IP)
if err != nil {
log.Printf("Error updating service: %s, %s", tunnel.Spec.ServiceName, err.Error())
return fmt.Errorf("tunnel update error %s", err)
}
}
}
} else {
return fmt.Errorf("unsupported provider: %s", c.infraConfig.Provider)
}
break
case provision.ActiveStatus:
if tunnel.Spec.ClientDeploymentRef == nil {
get := metav1.GetOptions{}
service, getServiceErr := c.kubeclientset.CoreV1().Services(tunnel.Namespace).Get(tunnel.Spec.ServiceName, get)
service, getServiceErr := c.kubeclientset.CoreV1().Services(tunnel.Namespace).Get(context.Background(), tunnel.Spec.ServiceName, get)

if getServiceErr != nil {
return getServiceErr
Expand All @@ -758,7 +809,7 @@ func (c *Controller) syncHandler(key string) error {

deployment, createDeployErr := c.kubeclientset.AppsV1().
Deployments(tunnel.Namespace).
Create(client)
Create(context.Background(), client, metav1.CreateOptions{})

if createDeployErr != nil {
log.Println(createDeployErr)
Expand All @@ -771,7 +822,7 @@ func (c *Controller) syncHandler(key string) error {

_, updateErr := c.operatorclientset.InletsV1alpha1().
Tunnels(tunnel.Namespace).
Update(tunnel)
Update(context.Background(), tunnel, metav1.UpdateOptions{})

if updateErr != nil {
log.Println(updateErr)
Expand Down Expand Up @@ -880,7 +931,7 @@ func makeClient(tunnel *inletsv1alpha1.Tunnel, targetPort int32, clientImage str

func (c *Controller) updateService(tunnel *inletsv1alpha1.Tunnel, ip string) error {
get := metav1.GetOptions{}
res, err := c.kubeclientset.CoreV1().Services(tunnel.Namespace).Get(tunnel.Spec.ServiceName, get)
res, err := c.kubeclientset.CoreV1().Services(tunnel.Namespace).Get(context.Background(), tunnel.Spec.ServiceName, get)
if err != nil {
return err
}
Expand All @@ -899,7 +950,7 @@ func (c *Controller) updateService(tunnel *inletsv1alpha1.Tunnel, ip string) err
copy.Spec.ExternalIPs = append(copy.Spec.ExternalIPs, ip)
}

res, err = c.kubeclientset.CoreV1().Services(tunnel.Namespace).Update(copy)
res, err = c.kubeclientset.CoreV1().Services(tunnel.Namespace).Update(context.Background(), copy, metav1.UpdateOptions{})
if err != nil {
return err
}
Expand All @@ -911,7 +962,7 @@ func (c *Controller) updateService(tunnel *inletsv1alpha1.Tunnel, ip string) err
copy.Status.LoadBalancer.Ingress[i] = corev1.LoadBalancerIngress{IP: ip}
}

_, err = c.kubeclientset.CoreV1().Services(tunnel.Namespace).UpdateStatus(copy)
_, err = c.kubeclientset.CoreV1().Services(tunnel.Namespace).UpdateStatus(context.Background(), copy, metav1.UpdateOptions{})
return err
}

Expand All @@ -923,7 +974,7 @@ func (c *Controller) updateTunnelProvisioningStatus(tunnel *inletsv1alpha1.Tunne
tunnelCopy.Status.HostID = id
tunnelCopy.Status.HostIP = ip

_, err := c.operatorclientset.InletsV1alpha1().Tunnels(tunnel.Namespace).UpdateStatus(tunnelCopy)
_, err := c.operatorclientset.InletsV1alpha1().Tunnels(tunnel.Namespace).UpdateStatus(context.Background(), tunnelCopy, metav1.UpdateOptions{})
return err
}

Expand Down
11 changes: 5 additions & 6 deletions go.mod
Original file line number Diff line number Diff line change
Expand Up @@ -6,14 +6,13 @@ go 1.13

require (
github.com/aws/aws-sdk-go v1.27.3 // indirect
github.com/inlets/inletsctl v0.0.0-20200211114314-aab68519494e
github.com/inlets/inletsctl v0.0.0-20200630123138-2af07d807845
github.com/sethvargo/go-password v0.1.3

k8s.io/api v0.17.0
k8s.io/apimachinery v0.17.1-beta.0
k8s.io/client-go v0.17.0
k8s.io/code-generator v0.17.0
k8s.io/api v0.18.3
k8s.io/apimachinery v0.18.3
k8s.io/client-go v0.18.3
k8s.io/code-generator v0.18.5
k8s.io/gengo v0.0.0-20200127102705-1e9b17e831be // indirect
k8s.io/klog v1.0.0
k8s.io/kube-openapi v0.0.0-20200130172213-cdac1c71ff9f // indirect
)
Loading

0 comments on commit 2682661

Please sign in to comment.