From 8bfe2cfb8020fd26b2ec0f8275abba8dbe0f64cc Mon Sep 17 00:00:00 2001 From: Beza Date: Sun, 21 Dec 2025 19:47:11 -0300 Subject: [PATCH 01/31] Standardize code blocks by removing leading '$' from command examples - Removes the '$' character from the beginning of commands in code blocks and terminal examples throughout the documentation. - Improves copy-paste usability and prevents errors when running commands. - Ensures consistency and follows documentation best practices for shell commands. Signed-off-by: Beza --- .../content/2-edge/reference/cli/check.md | 2 +- .../content/2-edge/reference/iptables.md | 2 +- .../configuring-dynamic-request-routing.md | 6 +- .../tasks/configuring-per-route-policy.md | 18 +-- .../2-edge/tasks/managing-egress-traffic.md | 10 +- .../tasks/multicluster-using-statefulsets.md | 46 +++--- .../content/2-edge/tasks/multicluster.md | 4 +- .../2-edge/tasks/restricting-access.md | 4 +- .../2-edge/tasks/securing-linkerd-tap.md | 16 +- .../content/2-edge/tasks/troubleshooting.md | 108 +++++++------- .../content/2.10/reference/cli/check.md | 2 +- .../2.10/tasks/getting-per-route-metrics.md | 6 +- .../2.10/tasks/securing-your-cluster.md | 16 +- .../content/2.10/tasks/troubleshooting.md | 138 +++++++++--------- .../content/2.11/reference/cli/check.md | 2 +- linkerd.io/content/2.11/reference/iptables.md | 2 +- .../2.11/tasks/getting-per-route-metrics.md | 6 +- .../tasks/multicluster-using-statefulsets.md | 48 +++--- .../content/2.11/tasks/restricting-access.md | 12 +- .../2.11/tasks/securing-your-cluster.md | 16 +- .../content/2.11/tasks/troubleshooting.md | 138 +++++++++--------- .../content/2.12/reference/cli/check.md | 2 +- linkerd.io/content/2.12/reference/iptables.md | 2 +- .../tasks/configuring-per-route-policy.md | 18 +-- .../2.12/tasks/getting-per-route-metrics.md | 6 +- .../tasks/multicluster-using-statefulsets.md | 48 +++--- .../content/2.12/tasks/restricting-access.md | 4 +- .../2.12/tasks/securing-linkerd-tap.md | 16 +- .../content/2.12/tasks/troubleshooting.md | 114 +++++++-------- linkerd.io/content/2.12/tasks/upgrade.md | 20 +-- .../content/2.13/reference/cli/check.md | 2 +- linkerd.io/content/2.13/reference/iptables.md | 2 +- .../configuring-dynamic-request-routing.md | 6 +- .../tasks/configuring-per-route-policy.md | 18 +-- .../2.13/tasks/getting-per-route-metrics.md | 6 +- .../tasks/multicluster-using-statefulsets.md | 48 +++--- .../content/2.13/tasks/restricting-access.md | 4 +- .../2.13/tasks/securing-linkerd-tap.md | 16 +- .../content/2.13/tasks/troubleshooting.md | 114 +++++++-------- linkerd.io/content/2.13/tasks/upgrade.md | 20 +-- .../content/2.14/reference/cli/check.md | 2 +- linkerd.io/content/2.14/reference/iptables.md | 2 +- .../configuring-dynamic-request-routing.md | 6 +- .../tasks/configuring-per-route-policy.md | 18 +-- .../2.14/tasks/getting-per-route-metrics.md | 6 +- .../tasks/multicluster-using-statefulsets.md | 48 +++--- .../content/2.14/tasks/restricting-access.md | 4 +- .../2.14/tasks/securing-linkerd-tap.md | 16 +- .../content/2.14/tasks/troubleshooting.md | 114 +++++++-------- linkerd.io/content/2.14/tasks/upgrade.md | 20 +-- .../content/2.15/reference/cli/check.md | 2 +- linkerd.io/content/2.15/reference/iptables.md | 2 +- .../configuring-dynamic-request-routing.md | 6 +- .../tasks/configuring-per-route-policy.md | 18 +-- .../2.15/tasks/getting-per-route-metrics.md | 6 +- .../tasks/multicluster-using-statefulsets.md | 48 +++--- .../content/2.15/tasks/restricting-access.md | 4 +- .../2.15/tasks/securing-linkerd-tap.md | 16 +- .../content/2.15/tasks/troubleshooting.md | 112 +++++++------- linkerd.io/content/2.15/tasks/upgrade.md | 20 +-- .../content/2.16/reference/cli/check.md | 2 +- linkerd.io/content/2.16/reference/iptables.md | 2 +- .../configuring-dynamic-request-routing.md | 6 +- .../tasks/configuring-per-route-policy.md | 18 +-- .../tasks/multicluster-using-statefulsets.md | 48 +++--- .../content/2.16/tasks/restricting-access.md | 4 +- .../2.16/tasks/securing-linkerd-tap.md | 16 +- .../content/2.16/tasks/troubleshooting.md | 112 +++++++------- linkerd.io/content/2.16/tasks/upgrade.md | 20 +-- .../content/2.17/reference/cli/check.md | 2 +- linkerd.io/content/2.17/reference/iptables.md | 2 +- .../configuring-dynamic-request-routing.md | 6 +- .../tasks/configuring-per-route-policy.md | 18 +-- .../2.17/tasks/managing-egress-traffic.md | 10 +- .../tasks/multicluster-using-statefulsets.md | 48 +++--- .../content/2.17/tasks/restricting-access.md | 4 +- .../2.17/tasks/securing-linkerd-tap.md | 16 +- .../content/2.17/tasks/troubleshooting.md | 112 +++++++------- linkerd.io/content/2.17/tasks/upgrade.md | 20 +-- .../content/2.18/reference/cli/check.md | 2 +- linkerd.io/content/2.18/reference/iptables.md | 2 +- .../configuring-dynamic-request-routing.md | 6 +- .../tasks/configuring-per-route-policy.md | 18 +-- .../2.18/tasks/managing-egress-traffic.md | 10 +- .../tasks/multicluster-using-statefulsets.md | 48 +++--- linkerd.io/content/2.18/tasks/multicluster.md | 4 +- .../content/2.18/tasks/restricting-access.md | 4 +- .../2.18/tasks/securing-linkerd-tap.md | 16 +- .../content/2.18/tasks/troubleshooting.md | 112 +++++++------- .../content/2.19/reference/cli/check.md | 2 +- linkerd.io/content/2.19/reference/iptables.md | 2 +- .../configuring-dynamic-request-routing.md | 6 +- .../tasks/configuring-per-route-policy.md | 18 +-- .../2.19/tasks/managing-egress-traffic.md | 10 +- .../tasks/multicluster-using-statefulsets.md | 48 +++--- linkerd.io/content/2.19/tasks/multicluster.md | 4 +- .../content/2.19/tasks/restricting-access.md | 4 +- .../2.19/tasks/securing-linkerd-tap.md | 16 +- .../content/2.19/tasks/troubleshooting.md | 108 +++++++------- .../index.md | 8 +- .../index.md | 6 +- .../index.md | 14 +- .../1007-linkerd-distributed-tracing/index.md | 2 +- .../2024/1015-edge-release-roundup/index.md | 2 +- .../0725-tilt-linkerd-nginx-part-2/index.md | 2 +- 105 files changed, 1239 insertions(+), 1239 deletions(-) diff --git a/linkerd.io/content/2-edge/reference/cli/check.md b/linkerd.io/content/2-edge/reference/cli/check.md index 7cd61cd237..67a2486908 100644 --- a/linkerd.io/content/2-edge/reference/cli/check.md +++ b/linkerd.io/content/2-edge/reference/cli/check.md @@ -12,7 +12,7 @@ for a full list of all the possible checks, what they do and how to fix them. ## Example output ```bash -$ linkerd check +linkerd check kubernetes-api -------------- √ can initialize the client diff --git a/linkerd.io/content/2-edge/reference/iptables.md b/linkerd.io/content/2-edge/reference/iptables.md index 67a7ea89de..9b4d229a59 100644 --- a/linkerd.io/content/2-edge/reference/iptables.md +++ b/linkerd.io/content/2-edge/reference/iptables.md @@ -164,7 +164,7 @@ Alternatively, if you want to inspect the iptables rules created for a pod, you can retrieve them through the following command: ```bash -$ kubectl -n logs linkerd-init +kubectl -n logs linkerd-init # where is the name of the pod # you want to see the iptables rules for ``` diff --git a/linkerd.io/content/2-edge/tasks/configuring-dynamic-request-routing.md b/linkerd.io/content/2-edge/tasks/configuring-dynamic-request-routing.md index 004b50ded6..a44d12a1a5 100644 --- a/linkerd.io/content/2-edge/tasks/configuring-dynamic-request-routing.md +++ b/linkerd.io/content/2-edge/tasks/configuring-dynamic-request-routing.md @@ -67,7 +67,7 @@ Requests to `/echo` on port 9898 to the frontend pod will get forwarded the pod pointed by the Service `backend-a-podinfo`: ```bash -$ curl -sX POST localhost:9898/echo \ +curl -sX POST localhost:9898/echo \ | grep -o 'PODINFO_UI_MESSAGE=. backend' PODINFO_UI_MESSAGE=A backend @@ -132,7 +132,7 @@ the `backend-a-podinfo` Service. The previous requests should still reach `backend-a-podinfo` only: ```bash -$ curl -sX POST localhost:9898/echo \ +curl -sX POST localhost:9898/echo \ | grep -o 'PODINFO_UI_MESSAGE=. backend' PODINFO_UI_MESSAGE=A backend @@ -142,7 +142,7 @@ But if we add the `x-request-id: alternative` header, they get routed to `backend-b-podinfo`: ```bash -$ curl -sX POST \ +curl -sX POST \ -H 'x-request-id: alternative' \ localhost:9898/echo \ | grep -o 'PODINFO_UI_MESSAGE=. backend' diff --git a/linkerd.io/content/2-edge/tasks/configuring-per-route-policy.md b/linkerd.io/content/2-edge/tasks/configuring-per-route-policy.md index 2c495e1f20..aaa3a9b43d 100644 --- a/linkerd.io/content/2-edge/tasks/configuring-per-route-policy.md +++ b/linkerd.io/content/2-edge/tasks/configuring-per-route-policy.md @@ -30,7 +30,7 @@ haven't already done this. Inject and install the Books demo application: ```bash -$ kubectl create ns booksapp && \ +kubectl create ns booksapp && \ curl --proto '=https' --tlsv1.2 -sSfL https://run.linkerd.io/booksapp.yml \ | linkerd inject - \ | kubectl -n booksapp apply -f - @@ -44,21 +44,21 @@ run in the `booksapp` namespace. Confirm that the Linkerd data plane was injected successfully: ```bash -$ linkerd check -n booksapp --proxy -o short +linkerd check -n booksapp --proxy -o short ``` You can take a quick look at all the components that were added to your cluster by running: ```bash -$ kubectl -n booksapp get all +kubectl -n booksapp get all ``` Once the rollout has completed successfully, you can access the app itself by port-forwarding `webapp` locally: ```bash -$ kubectl -n booksapp port-forward svc/webapp 7000 & +kubectl -n booksapp port-forward svc/webapp 7000 & ``` Open [http://localhost:7000/](http://localhost:7000/) in your browser to see the @@ -87,7 +87,7 @@ First, let's run the `linkerd viz authz` command to list the authorization resources that currently exist for the `authors` deployment: ```bash -$ linkerd viz authz -n booksapp deploy/authors +linkerd viz authz -n booksapp deploy/authors ROUTE SERVER AUTHORIZATION UNAUTHORIZED SUCCESS RPS LATENCY_P50 LATENCY_P95 LATENCY_P99 default default:all-unauthenticated default/all-unauthenticated 0.0rps 70.31% 8.1rps 1ms 43ms 49ms probe default:all-unauthenticated default/probe 0.0rps 100.00% 0.3rps 1ms 1ms 1ms @@ -124,7 +124,7 @@ Now that we've defined a [`Server`] for the authors `Deployment`, we can run the currently unauthorized: ```bash -$ linkerd viz authz -n booksapp deploy/authors +linkerd viz authz -n booksapp deploy/authors ROUTE SERVER AUTHORIZATION UNAUTHORIZED SUCCESS RPS LATENCY_P50 LATENCY_P95 LATENCY_P99 default authors-server 9.5rps 0.00% 0.0rps 0ms 0ms 0ms probe authors-server default/probe 0.0rps 100.00% 0.1rps 1ms 1ms 1ms @@ -312,7 +312,7 @@ network (0.0.0.0). Running `linkerd viz authz` again, we can now see that our new policies exist: ```bash -$ linkerd viz authz -n booksapp deploy/authors +linkerd viz authz -n booksapp deploy/authors ROUTE SERVER AUTHORIZATION UNAUTHORIZED SUCCESS RPS LATENCY_P50 LATENCY_P95 LATENCY_P99 authors-get-route authors-server authorizationpolicy/authors-get-policy 0.0rps 100.00% 0.1rps 2ms 2ms 2ms authors-probe-route authors-server authorizationpolicy/authors-probe-policy 0.0rps 100.00% 0.1rps 1ms 1ms 1ms @@ -383,7 +383,7 @@ requests, but we haven't _authorized_ requests to that route. Running the requests to `authors-modify-route`: ```bash -$ linkerd viz authz -n booksapp deploy/authors +linkerd viz authz -n booksapp deploy/authors ROUTE SERVER AUTHORIZATION UNAUTHORIZED SUCCESS RPS LATENCY_P50 LATENCY_P95 LATENCY_P99 authors-get-route authors-server authorizationpolicy/authors-get-policy - - - - - - authors-modify-route authors-server 9.7rps 0.00% 0.0rps 0ms 0ms 0ms @@ -442,7 +442,7 @@ Running the `linkerd viz authz` command one last time, we now see that all traffic is authorized: ```bash -$ linkerd viz authz -n booksapp deploy/authors +linkerd viz authz -n booksapp deploy/authors ROUTE SERVER AUTHORIZATION UNAUTHORIZED SUCCESS RPS LATENCY_P50 LATENCY_P95 LATENCY_P99 authors-get-route authors-server authorizationpolicy/authors-get-policy 0.0rps 100.00% 0.1rps 0ms 0ms 0ms authors-modify-route authors-server authorizationpolicy/authors-modify-policy 0.0rps 100.00% 0.0rps 0ms 0ms 0ms diff --git a/linkerd.io/content/2-edge/tasks/managing-egress-traffic.md b/linkerd.io/content/2-edge/tasks/managing-egress-traffic.md index a43eadb61a..a4a7155edd 100644 --- a/linkerd.io/content/2-edge/tasks/managing-egress-traffic.md +++ b/linkerd.io/content/2-edge/tasks/managing-egress-traffic.md @@ -70,7 +70,7 @@ Now SSH into the client container and start generating some external traffic: ```bash kubectl -n egress-test exec -it client -c client -- sh -$ while sleep 1; do curl -s http://httpbin.org/get ; done +while sleep 1; do curl -s http://httpbin.org/get ; done ``` In a separate shell, you can use the Linkerd diagnostics command to visualize @@ -235,7 +235,7 @@ Interestingly enough though, if we go back to our client shell and we try to initiate HTTPS traffic to the same service, it will not be allowed: ```bash -~ $ curl -v https://httpbin.org/get +curl -v https://httpbin.org/get curl: (35) TLS connect error: error:00000000:lib(0)::reason(0) ``` @@ -458,7 +458,7 @@ Now let's verify all works as expected: ```bash # plaintext traffic goes as expected to the /get path -$ curl http://httpbin.org/get +curl http://httpbin.org/get { "args": {}, "headers": { @@ -472,14 +472,14 @@ $ curl http://httpbin.org/get } # encrypted traffic can target all paths and hosts -$ curl https://httpbin.org/ip +curl https://httpbin.org/ip { "origin": "51.116.126.217" } # arbitrary unencrypted traffic goes to the internal service -$ curl http://google.com +curl http://google.com { "requestUID": "in:http-sid:terminus-grpc:-1-h1:80-190120723", "payload": "You cannot go there right now"} diff --git a/linkerd.io/content/2-edge/tasks/multicluster-using-statefulsets.md b/linkerd.io/content/2-edge/tasks/multicluster-using-statefulsets.md index 81969979a0..df55b3ee41 100644 --- a/linkerd.io/content/2-edge/tasks/multicluster-using-statefulsets.md +++ b/linkerd.io/content/2-edge/tasks/multicluster-using-statefulsets.md @@ -48,8 +48,8 @@ The first step is to clone the demo repository on your local machine. ```sh # clone example repository -$ git clone git@github.com:linkerd/l2d-k3d-statefulset.git -$ cd l2d-k3d-statefulset +git clone git@github.com:linkerd/l2d-k3d-statefulset.git +cd l2d-k3d-statefulset ``` The second step consists of creating two `k3d` clusters named `east` and `west`, @@ -60,10 +60,10 @@ everything. ```sh # create k3d clusters -$ ./create.sh +./create.sh # list the clusters -$ k3d cluster list +k3d cluster list NAME SERVERS AGENTS LOADBALANCER east 1/1 0/0 true west 1/1 0/0 true @@ -77,10 +77,10 @@ controllers and links are generated for both clusters. ```sh # Install Linkerd and multicluster, output to check should be a success -$ ./install.sh +./install.sh # Next, link the two clusters together -$ ./link.sh +./link.sh ``` Perfect! If you've made it this far with no errors, then it's a good sign. In @@ -100,17 +100,17 @@ communication. First, we will deploy our pods and services: ```sh # deploy services and mesh namespaces -$ ./deploy.sh +./deploy.sh # verify both clusters # # verify east -$ kubectl --context=k3d-east get pods +kubectl --context=k3d-east get pods NAME READY STATUS RESTARTS AGE curl-56dc7d945d-96r6p 2/2 Running 0 7s # verify west has headless service -$ kubectl --context=k3d-west get services +kubectl --context=k3d-west get services NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE kubernetes ClusterIP 10.43.0.1 443/TCP 10m nginx-svc ClusterIP None 80/TCP 8s @@ -118,7 +118,7 @@ nginx-svc ClusterIP None 80/TCP 8s # verify west has statefulset # # this may take a while to come up -$ kubectl --context=k3d-west get pods +kubectl --context=k3d-west get pods NAME READY STATUS RESTARTS AGE nginx-set-0 2/2 Running 0 53s nginx-set-1 2/2 Running 0 43s @@ -129,7 +129,7 @@ Before we go further, let's have a look at the endpoints object for the `nginx-svc`: ```sh -$ kubectl --context=k3d-west get endpoints nginx-svc -o yaml +kubectl --context=k3d-west get endpoints nginx-svc -o yaml ... subsets: - addresses: @@ -169,15 +169,15 @@ would get an answer back. We can test this out by applying the curl pod to the `west` cluster: ```sh -$ kubectl --context=k3d-west apply -f east/curl.yml -$ kubectl --context=k3d-west get pods +kubectl --context=k3d-west apply -f east/curl.yml +kubectl --context=k3d-west get pods NAME READY STATUS RESTARTS AGE nginx-set-0 2/2 Running 0 5m8s nginx-set-1 2/2 Running 0 4m58s nginx-set-2 2/2 Running 0 4m51s curl-56dc7d945d-s4n8j 0/2 PodInitializing 0 4s -$ kubectl --context=k3d-west exec -it curl-56dc7d945d-s4n8j -c curl -- sh +kubectl --context=k3d-west exec -it curl-56dc7d945d-s4n8j -c curl -- sh /$ # prompt for curl pod ``` @@ -185,7 +185,7 @@ If we now curl one of these instances, we will get back a response. ```sh # exec'd on the pod -/ $ curl nginx-set-0.nginx-svc.default.svc.west.cluster.local +/ curl nginx-set-0.nginx-svc.default.svc.west.cluster.local " @@ -217,10 +217,10 @@ Now, let's do the same, but this time from the `east` cluster. We will first export the service. ```sh -$ kubectl --context=k3d-west label service nginx-svc mirror.linkerd.io/exported="true" +kubectl --context=k3d-west label service nginx-svc mirror.linkerd.io/exported="true" service/nginx-svc labeled -$ kubectl --context=k3d-east get services +kubectl --context=k3d-east get services NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE kubernetes ClusterIP 10.43.0.1 443/TCP 20h nginx-svc-west ClusterIP None 80/TCP 29s @@ -234,7 +234,7 @@ endpoints for `nginx-svc-west` will have the same hostnames, but each hostname will point to one of the services we see above: ```sh -$ kubectl --context=k3d-east get endpoints nginx-svc-k3d-west -o yaml +kubectl --context=k3d-east get endpoints nginx-svc-k3d-west -o yaml subsets: - addresses: - hostname: nginx-set-0 @@ -250,17 +250,17 @@ cluster (`west`), will be mirrored as a clusterIP service. We will see in a second why this matters. ```sh -$ kubectl --context=k3d-east get pods +kubectl --context=k3d-east get pods NAME READY STATUS RESTARTS AGE curl-56dc7d945d-96r6p 2/2 Running 0 23m # exec and curl -$ kubectl --context=k3d-east exec curl-56dc7d945d-96r6p -it -c curl -- sh +kubectl --context=k3d-east exec curl-56dc7d945d-96r6p -it -c curl -- sh # we want to curl the same hostname we see in the endpoints object above. # however, the service and cluster domain will now be different, since we # are in a different cluster. # -/ $ curl nginx-set-0.nginx-svc-k3d-west.default.svc.east.cluster.local +/ curl nginx-set-0.nginx-svc-k3d-west.default.svc.east.cluster.local @@ -328,8 +328,8 @@ validation. To clean-up, you can remove both clusters entirely using the k3d CLI: ```sh -$ k3d cluster delete east +k3d cluster delete east cluster east deleted -$ k3d cluster delete west +k3d cluster delete west cluster west deleted ``` diff --git a/linkerd.io/content/2-edge/tasks/multicluster.md b/linkerd.io/content/2-edge/tasks/multicluster.md index 3a80b3f3ed..2779b7616a 100644 --- a/linkerd.io/content/2-edge/tasks/multicluster.md +++ b/linkerd.io/content/2-edge/tasks/multicluster.md @@ -506,9 +506,9 @@ To cleanup the multicluster control plane, you can run: ```bash # Delete the link CR -$ kubectl --context=west -n linkerd-multicluster delete links east +kubectl --context=west -n linkerd-multicluster delete links east # Delete the test namespace and uninstall multicluster -$ for ctx in west east; do \ +for ctx in west east; do \ kubectl --context=${ctx} delete ns test; \ linkerd --context=${ctx} multicluster uninstall | kubectl --context=${ctx} delete -f - ; \ done diff --git a/linkerd.io/content/2-edge/tasks/restricting-access.md b/linkerd.io/content/2-edge/tasks/restricting-access.md index 5654518600..c9850725f7 100644 --- a/linkerd.io/content/2-edge/tasks/restricting-access.md +++ b/linkerd.io/content/2-edge/tasks/restricting-access.md @@ -21,9 +21,9 @@ haven't already done this. Inject and install the Emojivoto application: ```bash -$ linkerd inject https://run.linkerd.io/emojivoto.yml | kubectl apply -f - +linkerd inject https://run.linkerd.io/emojivoto.yml | kubectl apply -f - ... -$ linkerd check -n emojivoto --proxy -o short +linkerd check -n emojivoto --proxy -o short ... ``` diff --git a/linkerd.io/content/2-edge/tasks/securing-linkerd-tap.md b/linkerd.io/content/2-edge/tasks/securing-linkerd-tap.md index 8a802c890c..639f81692f 100644 --- a/linkerd.io/content/2-edge/tasks/securing-linkerd-tap.md +++ b/linkerd.io/content/2-edge/tasks/securing-linkerd-tap.md @@ -60,7 +60,7 @@ kubectl auth can-i watch deployments.tap.linkerd.io -n emojivoto --as $(whoami) You can also use the Linkerd CLI's `--as` flag to confirm: ```bash -$ linkerd viz tap -n linkerd deploy/linkerd-controller --as $(whoami) +linkerd viz tap -n linkerd deploy/linkerd-controller --as $(whoami) Cannot connect to Linkerd Viz: namespaces is forbidden: User "XXXX" cannot list resource "namespaces" in API group "" at the cluster scope Validate the install with: linkerd viz check ... @@ -77,7 +77,7 @@ To enable tap access to all resources in all namespaces, you may bind your user to the `linkerd-linkerd-tap-admin` ClusterRole, installed by default: ```bash -$ kubectl describe clusterroles/linkerd-linkerd-viz-tap-admin +kubectl describe clusterroles/linkerd-linkerd-viz-tap-admin Name: linkerd-linkerd-viz-tap-admin Labels: component=tap linkerd.io/extension=viz @@ -109,7 +109,7 @@ kubectl create clusterrolebinding \ You can verify you now have tap access with: ```bash -$ linkerd viz tap -n linkerd deploy/linkerd-controller --as $(whoami) +linkerd viz tap -n linkerd deploy/linkerd-controller --as $(whoami) req id=3:0 proxy=in src=10.244.0.1:37392 dst=10.244.0.13:9996 tls=not_provided_by_remote :method=GET :authority=10.244.0.13:9996 :path=/ping ... ``` @@ -143,14 +143,14 @@ Because GCloud provides this additional level of access, there are cases where not. To validate this, check whether your GCloud user has Tap access: ```bash -$ kubectl auth can-i watch pods.tap.linkerd.io --all-namespaces +kubectl auth can-i watch pods.tap.linkerd.io --all-namespaces yes ``` And then validate whether your RBAC user has Tap access: ```bash -$ kubectl auth can-i watch pods.tap.linkerd.io --all-namespaces --as $(gcloud config get-value account) +kubectl auth can-i watch pods.tap.linkerd.io --all-namespaces --as $(gcloud config get-value account) no - no RBAC policy matched ``` @@ -187,14 +187,14 @@ privileges necessary to tap resources. To confirm: ```bash -$ kubectl auth can-i watch pods.tap.linkerd.io --all-namespaces --as system:serviceaccount:linkerd-viz:web +kubectl auth can-i watch pods.tap.linkerd.io --all-namespaces --as system:serviceaccount:linkerd-viz:web yes ``` This access is enabled via a `linkerd-linkerd-viz-web-admin` ClusterRoleBinding: ```bash -$ kubectl describe clusterrolebindings/linkerd-linkerd-viz-web-admin +kubectl describe clusterrolebindings/linkerd-linkerd-viz-web-admin Name: linkerd-linkerd-viz-web-admin Labels: component=web linkerd.io/extensions=viz @@ -227,6 +227,6 @@ kubectl delete clusterrolebindings/linkerd-linkerd-viz-web-admin To confirm: ```bash -$ kubectl auth can-i watch pods.tap.linkerd.io --all-namespaces --as system:serviceaccount:linkerd-viz:web +kubectl auth can-i watch pods.tap.linkerd.io --all-namespaces --as system:serviceaccount:linkerd-viz:web no ``` diff --git a/linkerd.io/content/2-edge/tasks/troubleshooting.md b/linkerd.io/content/2-edge/tasks/troubleshooting.md index baaa71e206..5586441c26 100644 --- a/linkerd.io/content/2-edge/tasks/troubleshooting.md +++ b/linkerd.io/content/2-edge/tasks/troubleshooting.md @@ -230,7 +230,7 @@ Example failure: Ensure the Linkerd ClusterRoles exist: ```bash -$ kubectl get clusterroles | grep linkerd +kubectl get clusterroles | grep linkerd linkerd-linkerd-destination 9d linkerd-linkerd-identity 9d linkerd-linkerd-proxy-injector 9d @@ -240,7 +240,7 @@ linkerd-policy 9d Also ensure you have permission to create ClusterRoles: ```bash -$ kubectl auth can-i create clusterroles +kubectl auth can-i create clusterroles yes ``` @@ -257,7 +257,7 @@ Example failure: Ensure the Linkerd ClusterRoleBindings exist: ```bash -$ kubectl get clusterrolebindings | grep linkerd +kubectl get clusterrolebindings | grep linkerd linkerd-linkerd-destination 9d linkerd-linkerd-identity 9d linkerd-linkerd-proxy-injector 9d @@ -267,7 +267,7 @@ linkerd-destination-policy 9d Also ensure you have permission to create ClusterRoleBindings: ```bash -$ kubectl auth can-i create clusterrolebindings +kubectl auth can-i create clusterrolebindings yes ``` @@ -284,7 +284,7 @@ Example failure: Ensure the Linkerd ServiceAccounts exist: ```bash -$ kubectl -n linkerd get serviceaccounts +kubectl -n linkerd get serviceaccounts NAME SECRETS AGE default 1 14m linkerd-destination 1 14m @@ -297,7 +297,7 @@ Also ensure you have permission to create ServiceAccounts in the Linkerd namespace: ```bash -$ kubectl -n linkerd auth can-i create serviceaccounts +kubectl -n linkerd auth can-i create serviceaccounts yes ``` @@ -314,7 +314,7 @@ Example failure: Ensure the Linkerd CRD exists: ```bash -$ kubectl get customresourcedefinitions +kubectl get customresourcedefinitions NAME CREATED AT serviceprofiles.linkerd.io 2019-04-25T21:47:31Z ``` @@ -322,7 +322,7 @@ serviceprofiles.linkerd.io 2019-04-25T21:47:31Z Also ensure you have permission to create CRDs: ```bash -$ kubectl auth can-i create customresourcedefinitions +kubectl auth can-i create customresourcedefinitions yes ``` @@ -339,14 +339,14 @@ Example failure: Ensure the Linkerd MutatingWebhookConfigurations exists: ```bash -$ kubectl get mutatingwebhookconfigurations | grep linkerd +kubectl get mutatingwebhookconfigurations | grep linkerd linkerd-proxy-injector-webhook-config 2019-07-01T13:13:26Z ``` Also ensure you have permission to create MutatingWebhookConfigurations: ```bash -$ kubectl auth can-i create mutatingwebhookconfigurations +kubectl auth can-i create mutatingwebhookconfigurations yes ``` @@ -363,14 +363,14 @@ Example failure: Ensure the Linkerd ValidatingWebhookConfiguration exists: ```bash -$ kubectl get validatingwebhookconfigurations | grep linkerd +kubectl get validatingwebhookconfigurations | grep linkerd linkerd-sp-validator-webhook-config 2019-07-01T13:13:26Z ``` Also ensure you have permission to create ValidatingWebhookConfigurations: ```bash -$ kubectl auth can-i create validatingwebhookconfigurations +kubectl auth can-i create validatingwebhookconfigurations yes ``` @@ -418,7 +418,7 @@ Example failure: Ensure the Linkerd ConfigMap exists: ```bash -$ kubectl -n linkerd get configmap/linkerd-config +kubectl -n linkerd get configmap/linkerd-config NAME DATA AGE linkerd-config 3 61m ``` @@ -426,7 +426,7 @@ linkerd-config 3 61m Also ensure you have permission to create ConfigMaps: ```bash -$ kubectl -n linkerd auth can-i create configmap +kubectl -n linkerd auth can-i create configmap yes ``` @@ -780,7 +780,7 @@ Example failure: Verify the state of the control plane pods with: ```bash -$ kubectl -n linkerd get po +kubectl -n linkerd get po NAME READY STATUS RESTARTS AGE linkerd-destination-5fd7b5d466-szgqm 2/2 Running 1 12m linkerd-identity-54df78c479-hbh5m 2/2 Running 0 12m @@ -862,7 +862,7 @@ Ensure you can connect to the Linkerd version check endpoint from the environment the `linkerd` cli is running: ```bash -$ curl "https://versioncheck.linkerd.io/version.json?version=edge-19.1.2&uuid=test-uuid&source=cli" +curl "https://versioncheck.linkerd.io/version.json?version=edge-19.1.2&uuid=test-uuid&source=cli" {"stable":"stable-2.1.0","edge":"edge-19.1.2"} ``` @@ -961,7 +961,7 @@ normally. Example failure: ```bash -$ linkerd check --proxy --namespace foo +linkerd check --proxy --namespace foo ... × data plane namespace exists The "foo" namespace does not exist @@ -1147,7 +1147,7 @@ Example error: Ensure that the linkerd-cni-config ConfigMap exists in the CNI namespace: ```bash -$ kubectl get cm linkerd-cni-config -n linkerd-cni +kubectl get cm linkerd-cni-config -n linkerd-cni NAME PRIV CAPS SELINUX RUNASUSER FSGROUP SUPGROUP READONLYROOTFS VOLUMES linkerd-linkerd-cni-cni false RunAsAny RunAsAny RunAsAny RunAsAny false hostPath,secret ``` @@ -1155,7 +1155,7 @@ linkerd-linkerd-cni-cni false RunAsAny RunAsAny RunAsAny RunAs Also ensure you have permission to create ConfigMaps: ```bash -$ kubectl auth can-i create ConfigMaps +kubectl auth can-i create ConfigMaps yes ``` @@ -1172,7 +1172,7 @@ Example error: Ensure that the cluster role exists: ```bash -$ kubectl get clusterrole linkerd-cni +kubectl get clusterrole linkerd-cni NAME AGE linkerd-cni 54m ``` @@ -1180,7 +1180,7 @@ linkerd-cni 54m Also ensure you have permission to create ClusterRoles: ```bash -$ kubectl auth can-i create ClusterRoles +kubectl auth can-i create ClusterRoles yes ``` @@ -1197,7 +1197,7 @@ Example error: Ensure that the cluster role binding exists: ```bash -$ kubectl get clusterrolebinding linkerd-cni +kubectl get clusterrolebinding linkerd-cni NAME AGE linkerd-cni 54m ``` @@ -1205,7 +1205,7 @@ linkerd-cni 54m Also ensure you have permission to create ClusterRoleBindings: ```bash -$ kubectl auth can-i create ClusterRoleBindings +kubectl auth can-i create ClusterRoleBindings yes ``` @@ -1222,7 +1222,7 @@ Example error: Ensure that the CNI service account exists in the CNI namespace: ```bash -$ kubectl get ServiceAccount linkerd-cni -n linkerd-cni +kubectl get ServiceAccount linkerd-cni -n linkerd-cni NAME SECRETS AGE linkerd-cni 1 45m ``` @@ -1230,7 +1230,7 @@ linkerd-cni 1 45m Also ensure you have permission to create ServiceAccount: ```bash -$ kubectl auth can-i create ServiceAccounts -n linkerd-cni +kubectl auth can-i create ServiceAccounts -n linkerd-cni yes ``` @@ -1247,7 +1247,7 @@ Example error: Ensure that the CNI daemonset exists in the CNI namespace: ```bash -$ kubectl get ds -n linkerd-cni +kubectl get ds -n linkerd-cni NAME DESIRED CURRENT READY UP-TO-DATE AVAILABLE NODE SELECTOR AGE linkerd-cni 1 1 1 1 1 beta.kubernetes.io/os=linux 14m ``` @@ -1255,7 +1255,7 @@ linkerd-cni 1 1 1 1 1 beta.kubernet Also ensure you have permission to create DaemonSets: ```bash -$ kubectl auth can-i create DaemonSets -n linkerd-cni +kubectl auth can-i create DaemonSets -n linkerd-cni yes ``` @@ -1272,7 +1272,7 @@ Example failure: Ensure that all the CNI pods are running: ```bash -$ kubectl get po -n linkerd-cn +kubectl get po -n linkerd-cn NAME READY STATUS RESTARTS AGE linkerd-cni-rzp2q 1/1 Running 0 9m20s linkerd-cni-mf564 1/1 Running 0 9m22s @@ -1282,7 +1282,7 @@ linkerd-cni-p5670 1/1 Running 0 9m25s Ensure that all pods have finished the deployment of the CNI config and binary: ```bash -$ kubectl logs linkerd-cni-rzp2q -n linkerd-cni +kubectl logs linkerd-cni-rzp2q -n linkerd-cni Wrote linkerd CNI binaries to /host/opt/cni/bin Created CNI config /host/etc/cni/net.d/10-kindnet.conflist Done configuring CNI. Sleep=true @@ -1310,7 +1310,7 @@ Make sure multicluster extension is correctly installed and that the `links.multicluster.linkerd.io` CRD is present. ```bash -$ kubectl get crds | grep multicluster +kubectl get crds | grep multicluster NAME CREATED AT links.multicluster.linkerd.io 2021-03-10T09:58:10Z ``` @@ -1400,7 +1400,7 @@ the rules section. Expected rules for `linkerd-service-mirror-access-local-resources` cluster role: ```bash -$ kubectl --context=local get clusterrole linkerd-service-mirror-access-local-resources -o yaml +kubectl --context=local get clusterrole linkerd-service-mirror-access-local-resources -o yaml kind: ClusterRole metadata: labels: @@ -1433,7 +1433,7 @@ rules: Expected rules for `linkerd-service-mirror-read-remote-creds` role: ```bash -$ kubectl --context=local get role linkerd-service-mirror-read-remote-creds -n linkerd-multicluster -o yaml +kubectl --context=local get role linkerd-service-mirror-read-remote-creds -n linkerd-multicluster -o yaml kind: Role metadata: labels: @@ -1466,7 +1466,7 @@ everything to start up. If this is a permanent error, you'll want to validate the state of the controller pod with: ```bash -$ kubectl --all-namespaces get po --selector linkerd.io/control-plane-component=linkerd-service-mirror +kubectl --all-namespaces get po --selector linkerd.io/control-plane-component=linkerd-service-mirror NAME READY STATUS RESTARTS AGE linkerd-service-mirror-7bb8ff5967-zg265 2/2 Running 0 50m ``` @@ -1612,7 +1612,7 @@ Example failure: Ensure the linkerd-viz extension ClusterRoles exist: ```bash -$ kubectl get clusterroles | grep linkerd-viz +kubectl get clusterroles | grep linkerd-viz linkerd-linkerd-viz-metrics-api 2021-01-26T18:02:17Z linkerd-linkerd-viz-prometheus 2021-01-26T18:02:17Z linkerd-linkerd-viz-tap 2021-01-26T18:02:17Z @@ -1623,7 +1623,7 @@ linkerd-linkerd-viz-web-check 2021-01-2 Also ensure you have permission to create ClusterRoles: ```bash -$ kubectl auth can-i create clusterroles +kubectl auth can-i create clusterroles yes ``` @@ -1640,7 +1640,7 @@ Example failure: Ensure the linkerd-viz extension ClusterRoleBindings exist: ```bash -$ kubectl get clusterrolebindings | grep linkerd-viz +kubectl get clusterrolebindings | grep linkerd-viz linkerd-linkerd-viz-metrics-api ClusterRole/linkerd-linkerd-viz-metrics-api 18h linkerd-linkerd-viz-prometheus ClusterRole/linkerd-linkerd-viz-prometheus 18h linkerd-linkerd-viz-tap ClusterRole/linkerd-linkerd-viz-tap 18h @@ -1652,7 +1652,7 @@ linkerd-linkerd-viz-web-check ClusterRole/linkerd-linke Also ensure you have permission to create ClusterRoleBindings: ```bash -$ kubectl auth can-i create clusterrolebindings +kubectl auth can-i create clusterrolebindings yes ``` @@ -1741,7 +1741,7 @@ requirements in the cluster: Ensure all the linkerd-viz pods are injected ```bash -$ kubectl -n linkerd-viz get pods +kubectl -n linkerd-viz get pods NAME READY STATUS RESTARTS AGE grafana-68cddd7cc8-nrv4h 2/2 Running 3 18h metrics-api-77f684f7c7-hnw8r 2/2 Running 2 18h @@ -1765,7 +1765,7 @@ Make sure that the `proxy-injector` is working correctly by running Ensure all the linkerd-viz pods are running with 2/2 ```bash -$ kubectl -n linkerd-viz get pods +kubectl -n linkerd-viz get pods NAME READY STATUS RESTARTS AGE grafana-68cddd7cc8-nrv4h 2/2 Running 3 18h metrics-api-77f684f7c7-hnw8r 2/2 Running 2 18h @@ -1936,7 +1936,7 @@ Ensure you can connect to the Linkerd Buoyant version check endpoint from the environment the `linkerd` cli is running: ```bash -$ curl https://buoyant.cloud/version.json +curl https://buoyant.cloud/version.json {"linkerd-buoyant":"v0.4.4"} ``` @@ -2001,7 +2001,7 @@ linkerd-buoyant install | kubectl apply -f - Ensure that the cluster role exists: ```bash -$ kubectl get clusterrole buoyant-cloud-agent +kubectl get clusterrole buoyant-cloud-agent NAME CREATED AT buoyant-cloud-agent 2020-11-13T00:59:50Z ``` @@ -2009,7 +2009,7 @@ buoyant-cloud-agent 2020-11-13T00:59:50Z Also ensure you have permission to create ClusterRoles: ```bash -$ kubectl auth can-i create ClusterRoles +kubectl auth can-i create ClusterRoles yes ``` @@ -2024,7 +2024,7 @@ yes Ensure that the cluster role binding exists: ```bash -$ kubectl get clusterrolebinding buoyant-cloud-agent +kubectl get clusterrolebinding buoyant-cloud-agent NAME ROLE AGE buoyant-cloud-agent ClusterRole/buoyant-cloud-agent 301d ``` @@ -2032,7 +2032,7 @@ buoyant-cloud-agent ClusterRole/buoyant-cloud-agent 301d Also ensure you have permission to create ClusterRoleBindings: ```bash -$ kubectl auth can-i create ClusterRoleBindings +kubectl auth can-i create ClusterRoleBindings yes ``` @@ -2047,7 +2047,7 @@ yes Ensure that the service account exists: ```bash -$ kubectl -n buoyant-cloud get serviceaccount buoyant-cloud-agent +kubectl -n buoyant-cloud get serviceaccount buoyant-cloud-agent NAME SECRETS AGE buoyant-cloud-agent 1 301d ``` @@ -2055,7 +2055,7 @@ buoyant-cloud-agent 1 301d Also ensure you have permission to create ServiceAccounts: ```bash -$ kubectl -n buoyant-cloud auth can-i create ServiceAccount +kubectl -n buoyant-cloud auth can-i create ServiceAccount yes ``` @@ -2070,7 +2070,7 @@ yes Ensure that the secret exists: ```bash -$ kubectl -n buoyant-cloud get secret buoyant-cloud-id +kubectl -n buoyant-cloud get secret buoyant-cloud-id NAME TYPE DATA AGE buoyant-cloud-id Opaque 4 301d ``` @@ -2078,7 +2078,7 @@ buoyant-cloud-id Opaque 4 301d Also ensure you have permission to create ServiceAccounts: ```bash -$ kubectl -n buoyant-cloud auth can-i create ServiceAccount +kubectl -n buoyant-cloud auth can-i create ServiceAccount yes ``` @@ -2116,7 +2116,7 @@ everything to start up. If this is a permanent error, you'll want to validate the state of the `buoyant-cloud-agent` Deployment with: ```bash -$ kubectl -n buoyant-cloud get po --selector app=buoyant-cloud-agent +kubectl -n buoyant-cloud get po --selector app=buoyant-cloud-agent NAME READY STATUS RESTARTS AGE buoyant-cloud-agent-6b8c6888d7-htr7d 2/2 Running 0 156m ``` @@ -2139,7 +2139,7 @@ Ensure the `buoyant-cloud-agent` pod is injected, the `READY` column should show `2/2`: ```bash -$ kubectl -n buoyant-cloud get pods --selector app=buoyant-cloud-agent +kubectl -n buoyant-cloud get pods --selector app=buoyant-cloud-agent NAME READY STATUS RESTARTS AGE buoyant-cloud-agent-6b8c6888d7-htr7d 2/2 Running 0 161m ``` @@ -2158,7 +2158,7 @@ Make sure that the `proxy-injector` is working correctly by running Check the version with: ```bash -$ linkerd-buoyant version +linkerd-buoyant version CLI version: v0.4.4 Agent version: v0.4.4 ``` @@ -2217,7 +2217,7 @@ everything to start up. If this is a permanent error, you'll want to validate the state of the `buoyant-cloud-metrics` DaemonSet with: ```bash -$ kubectl -n buoyant-cloud get po --selector app=buoyant-cloud-metrics +kubectl -n buoyant-cloud get po --selector app=buoyant-cloud-metrics NAME READY STATUS RESTARTS AGE buoyant-cloud-metrics-kt9mv 2/2 Running 0 163m buoyant-cloud-metrics-q8jhj 2/2 Running 0 163m @@ -2243,7 +2243,7 @@ Ensure the `buoyant-cloud-metrics` pods are injected, the `READY` column should show `2/2`: ```bash -$ kubectl -n buoyant-cloud get pods --selector app=buoyant-cloud-metrics +kubectl -n buoyant-cloud get pods --selector app=buoyant-cloud-metrics NAME READY STATUS RESTARTS AGE buoyant-cloud-metrics-kt9mv 2/2 Running 0 166m buoyant-cloud-metrics-q8jhj 2/2 Running 0 166m @@ -2265,7 +2265,7 @@ Make sure that the `proxy-injector` is working correctly by running Check the version with: ```bash -$ kubectl -n buoyant-cloud get daemonset/buoyant-cloud-metrics -o jsonpath='{.metadata.labels}' +kubectl -n buoyant-cloud get daemonset/buoyant-cloud-metrics -o jsonpath='{.metadata.labels}' {"app.kubernetes.io/name":"metrics","app.kubernetes.io/part-of":"buoyant-cloud","app.kubernetes.io/version":"v0.4.4"} ``` diff --git a/linkerd.io/content/2.10/reference/cli/check.md b/linkerd.io/content/2.10/reference/cli/check.md index 312891f8ef..578e3722d4 100644 --- a/linkerd.io/content/2.10/reference/cli/check.md +++ b/linkerd.io/content/2.10/reference/cli/check.md @@ -12,7 +12,7 @@ for a full list of all the possible checks, what they do and how to fix them. ## Example output ```bash -$ linkerd check +linkerd check kubernetes-api -------------- √ can initialize the client diff --git a/linkerd.io/content/2.10/tasks/getting-per-route-metrics.md b/linkerd.io/content/2.10/tasks/getting-per-route-metrics.md index 424ede9217..7d6120773c 100644 --- a/linkerd.io/content/2.10/tasks/getting-per-route-metrics.md +++ b/linkerd.io/content/2.10/tasks/getting-per-route-metrics.md @@ -14,7 +14,7 @@ For a tutorial that shows this functionality off, check out the You can view per-route metrics in the CLI by running `linkerd viz routes`: ```bash -$ linkerd viz routes svc/webapp +linkerd viz routes svc/webapp ROUTE SERVICE SUCCESS RPS LATENCY_P50 LATENCY_P95 LATENCY_P99 GET / webapp 100.00% 0.6rps 25ms 30ms 30ms GET /authors/{id} webapp 100.00% 0.6rps 22ms 29ms 30ms @@ -34,7 +34,7 @@ specified in your service profile will end up there. It is also possible to look the metrics up by other resource types, such as: ```bash -$ linkerd viz routes deploy/webapp +linkerd viz routes deploy/webapp ROUTE SERVICE SUCCESS RPS LATENCY_P50 LATENCY_P95 LATENCY_P99 [DEFAULT] kubernetes 0.00% 0.0rps 0ms 0ms 0ms GET / webapp 100.00% 0.5rps 27ms 38ms 40ms @@ -53,7 +53,7 @@ Then, it is possible to filter all the way down to requests going from a specific resource to other services: ```bash -$ linkerd viz routes deploy/webapp --to svc/books +linkerd viz routes deploy/webapp --to svc/books ROUTE SERVICE SUCCESS RPS LATENCY_P50 LATENCY_P95 LATENCY_P99 DELETE /books/{id}.json books 100.00% 0.5rps 18ms 29ms 30ms GET /books.json books 100.00% 1.1rps 7ms 12ms 18ms diff --git a/linkerd.io/content/2.10/tasks/securing-your-cluster.md b/linkerd.io/content/2.10/tasks/securing-your-cluster.md index 94d8f7dcc2..6c0efb9462 100644 --- a/linkerd.io/content/2.10/tasks/securing-your-cluster.md +++ b/linkerd.io/content/2.10/tasks/securing-your-cluster.md @@ -54,7 +54,7 @@ kubectl auth can-i watch deployments.tap.linkerd.io -n emojivoto --as $(whoami) You can also use the Linkerd CLI's `--as` flag to confirm: ```bash -$ linkerd viz tap -n linkerd deploy/linkerd-controller --as $(whoami) +linkerd viz tap -n linkerd deploy/linkerd-controller --as $(whoami) Cannot connect to Linkerd Viz: namespaces is forbidden: User "XXXX" cannot list resource "namespaces" in API group "" at the cluster scope Validate the install with: linkerd viz check ... @@ -71,7 +71,7 @@ To enable tap access to all resources in all namespaces, you may bind your user to the `linkerd-linkerd-tap-admin` ClusterRole, installed by default: ```bash -$ kubectl describe clusterroles/linkerd-linkerd-viz-tap-admin +kubectl describe clusterroles/linkerd-linkerd-viz-tap-admin Name: linkerd-linkerd-viz-tap-admin Labels: component=tap linkerd.io/extension=viz @@ -103,7 +103,7 @@ kubectl create clusterrolebinding \ You can verify you now have tap access with: ```bash -$ linkerd viz tap -n linkerd deploy/linkerd-controller --as $(whoami) +linkerd viz tap -n linkerd deploy/linkerd-controller --as $(whoami) req id=3:0 proxy=in src=10.244.0.1:37392 dst=10.244.0.13:9996 tls=not_provided_by_remote :method=GET :authority=10.244.0.13:9996 :path=/ping ... ``` @@ -137,14 +137,14 @@ Because GCloud provides this additional level of access, there are cases where not. To validate this, check whether your GCloud user has Tap access: ```bash -$ kubectl auth can-i watch pods.tap.linkerd.io --all-namespaces +kubectl auth can-i watch pods.tap.linkerd.io --all-namespaces yes ``` And then validate whether your RBAC user has Tap access: ```bash -$ kubectl auth can-i watch pods.tap.linkerd.io --all-namespaces --as $(gcloud config get-value account) +kubectl auth can-i watch pods.tap.linkerd.io --all-namespaces --as $(gcloud config get-value account) no - no RBAC policy matched ``` @@ -181,14 +181,14 @@ privileges necessary to tap resources. To confirm: ```bash -$ kubectl auth can-i watch pods.tap.linkerd.io --all-namespaces --as system:serviceaccount:linkerd-viz:web +kubectl auth can-i watch pods.tap.linkerd.io --all-namespaces --as system:serviceaccount:linkerd-viz:web yes ``` This access is enabled via a `linkerd-linkerd-viz-web-admin` ClusterRoleBinding: ```bash -$ kubectl describe clusterrolebindings/linkerd-linkerd-viz-web-admin +kubectl describe clusterrolebindings/linkerd-linkerd-viz-web-admin Name: linkerd-linkerd-viz-web-admin Labels: component=web linkerd.io/extensions=viz @@ -221,6 +221,6 @@ kubectl delete clusterrolebindings/linkerd-linkerd-viz-web-admin To confirm: ```bash -$ kubectl auth can-i watch pods.tap.linkerd.io --all-namespaces --as system:serviceaccount:linkerd-viz:web +kubectl auth can-i watch pods.tap.linkerd.io --all-namespaces --as system:serviceaccount:linkerd-viz:web no ``` diff --git a/linkerd.io/content/2.10/tasks/troubleshooting.md b/linkerd.io/content/2.10/tasks/troubleshooting.md index ee27242c04..19d5ec5ccb 100644 --- a/linkerd.io/content/2.10/tasks/troubleshooting.md +++ b/linkerd.io/content/2.10/tasks/troubleshooting.md @@ -314,7 +314,7 @@ Example failure: Ensure the Linkerd ClusterRoles exist: ```bash -$ kubectl get clusterroles | grep linkerd +kubectl get clusterroles | grep linkerd linkerd-linkerd-destination 9d linkerd-linkerd-identity 9d linkerd-linkerd-proxy-injector 9d @@ -323,7 +323,7 @@ linkerd-linkerd-proxy-injector 9d Also ensure you have permission to create ClusterRoles: ```bash -$ kubectl auth can-i create clusterroles +kubectl auth can-i create clusterroles yes ``` @@ -340,7 +340,7 @@ Example failure: Ensure the Linkerd ClusterRoleBindings exist: ```bash -$ kubectl get clusterrolebindings | grep linkerd +kubectl get clusterrolebindings | grep linkerd linkerd-linkerd-destination 9d linkerd-linkerd-identity 9d linkerd-linkerd-proxy-injector 9d @@ -349,7 +349,7 @@ linkerd-linkerd-proxy-injector 9d Also ensure you have permission to create ClusterRoleBindings: ```bash -$ kubectl auth can-i create clusterrolebindings +kubectl auth can-i create clusterrolebindings yes ``` @@ -366,7 +366,7 @@ Example failure: Ensure the Linkerd ServiceAccounts exist: ```bash -$ kubectl -n linkerd get serviceaccounts +kubectl -n linkerd get serviceaccounts NAME SECRETS AGE default 1 14m linkerd-destination 1 14m @@ -379,7 +379,7 @@ Also ensure you have permission to create ServiceAccounts in the Linkerd namespace: ```bash -$ kubectl -n linkerd auth can-i create serviceaccounts +kubectl -n linkerd auth can-i create serviceaccounts yes ``` @@ -396,7 +396,7 @@ Example failure: Ensure the Linkerd CRD exists: ```bash -$ kubectl get customresourcedefinitions +kubectl get customresourcedefinitions NAME CREATED AT serviceprofiles.linkerd.io 2019-04-25T21:47:31Z ``` @@ -404,7 +404,7 @@ serviceprofiles.linkerd.io 2019-04-25T21:47:31Z Also ensure you have permission to create CRDs: ```bash -$ kubectl auth can-i create customresourcedefinitions +kubectl auth can-i create customresourcedefinitions yes ``` @@ -421,14 +421,14 @@ Example failure: Ensure the Linkerd MutatingWebhookConfigurations exists: ```bash -$ kubectl get mutatingwebhookconfigurations | grep linkerd +kubectl get mutatingwebhookconfigurations | grep linkerd linkerd-proxy-injector-webhook-config 2019-07-01T13:13:26Z ``` Also ensure you have permission to create MutatingWebhookConfigurations: ```bash -$ kubectl auth can-i create mutatingwebhookconfigurations +kubectl auth can-i create mutatingwebhookconfigurations yes ``` @@ -445,14 +445,14 @@ Example failure: Ensure the Linkerd ValidatingWebhookConfiguration exists: ```bash -$ kubectl get validatingwebhookconfigurations | grep linkerd +kubectl get validatingwebhookconfigurations | grep linkerd linkerd-sp-validator-webhook-config 2019-07-01T13:13:26Z ``` Also ensure you have permission to create ValidatingWebhookConfigurations: ```bash -$ kubectl auth can-i create validatingwebhookconfigurations +kubectl auth can-i create validatingwebhookconfigurations yes ``` @@ -469,14 +469,14 @@ Example failure: Ensure the Linkerd PodSecurityPolicy exists: ```bash -$ kubectl get podsecuritypolicies | grep linkerd +kubectl get podsecuritypolicies | grep linkerd linkerd-linkerd-control-plane false NET_ADMIN,NET_RAW RunAsAny RunAsAny MustRunAs MustRunAs true configMap,emptyDir,secret,projected,downwardAPI,persistentVolumeClaim ``` Also ensure you have permission to create PodSecurityPolicies: ```bash -$ kubectl auth can-i create podsecuritypolicies +kubectl auth can-i create podsecuritypolicies yes ``` @@ -495,7 +495,7 @@ Example failure: Ensure the Linkerd ConfigMap exists: ```bash -$ kubectl -n linkerd get configmap/linkerd-config +kubectl -n linkerd get configmap/linkerd-config NAME DATA AGE linkerd-config 3 61m ``` @@ -503,7 +503,7 @@ linkerd-config 3 61m Also ensure you have permission to create ConfigMaps: ```bash -$ kubectl -n linkerd auth can-i create configmap +kubectl -n linkerd auth can-i create configmap yes ``` @@ -820,7 +820,7 @@ Example failure: Verify the state of the control plane pods with: ```bash -$ kubectl -n linkerd get po +kubectl -n linkerd get po NAME READY STATUS RESTARTS AGE linkerd-destination-5fd7b5d466-szgqm 2/2 Running 1 12m linkerd-identity-54df78c479-hbh5m 2/2 Running 0 12m @@ -883,7 +883,7 @@ Ensure you can connect to the Linkerd version check endpoint from the environment the `linkerd` cli is running: ```bash -$ curl "https://versioncheck.linkerd.io/version.json?version=edge-19.1.2&uuid=test-uuid&source=cli" +curl "https://versioncheck.linkerd.io/version.json?version=edge-19.1.2&uuid=test-uuid&source=cli" {"stable":"stable-2.1.0","edge":"edge-19.1.2"} ``` @@ -922,7 +922,7 @@ normally. Example failure: ```bash -$ linkerd check --proxy --namespace foo +linkerd check --proxy --namespace foo ... × data plane namespace exists The "foo" namespace does not exist @@ -1051,7 +1051,7 @@ Ensure the kube-system namespace has the `config.linkerd.io/admission-webhooks:disabled` label: ```bash -$ kubectl get namespace kube-system -oyaml +kubectl get namespace kube-system -oyaml kind: Namespace apiVersion: v1 metadata: @@ -1124,7 +1124,7 @@ Example error: Ensure that the linkerd-cni-config ConfigMap exists in the CNI namespace: ```bash -$ kubectl get cm linkerd-cni-config -n linkerd-cni +kubectl get cm linkerd-cni-config -n linkerd-cni NAME PRIV CAPS SELINUX RUNASUSER FSGROUP SUPGROUP READONLYROOTFS VOLUMES linkerd-linkerd-cni-cni false RunAsAny RunAsAny RunAsAny RunAsAny false hostPath,secret ``` @@ -1132,7 +1132,7 @@ linkerd-linkerd-cni-cni false RunAsAny RunAsAny RunAsAny RunAs Also ensure you have permission to create ConfigMaps: ```bash -$ kubectl auth can-i create ConfigMaps +kubectl auth can-i create ConfigMaps yes ``` @@ -1149,7 +1149,7 @@ Example error: Ensure that the pod security policy exists: ```bash -$ kubectl get psp linkerd-linkerd-cni-cni +kubectl get psp linkerd-linkerd-cni-cni NAME PRIV CAPS SELINUX RUNASUSER FSGROUP SUPGROUP READONLYROOTFS VOLUMES linkerd-linkerd-cni-cni false RunAsAny RunAsAny RunAsAny RunAsAny false hostPath,secret ``` @@ -1157,7 +1157,7 @@ linkerd-linkerd-cni-cni false RunAsAny RunAsAny RunAsAny RunAs Also ensure you have permission to create PodSecurityPolicies: ```bash -$ kubectl auth can-i create PodSecurityPolicies +kubectl auth can-i create PodSecurityPolicies yes ``` @@ -1174,7 +1174,7 @@ Example error: Ensure that the cluster role exists: ```bash -$ kubectl get clusterrole linkerd-cni +kubectl get clusterrole linkerd-cni NAME AGE linkerd-cni 54m ``` @@ -1182,7 +1182,7 @@ linkerd-cni 54m Also ensure you have permission to create ClusterRoles: ```bash -$ kubectl auth can-i create ClusterRoles +kubectl auth can-i create ClusterRoles yes ``` @@ -1199,7 +1199,7 @@ Example error: Ensure that the cluster role binding exists: ```bash -$ kubectl get clusterrolebinding linkerd-cni +kubectl get clusterrolebinding linkerd-cni NAME AGE linkerd-cni 54m ``` @@ -1207,7 +1207,7 @@ linkerd-cni 54m Also ensure you have permission to create ClusterRoleBindings: ```bash -$ kubectl auth can-i create ClusterRoleBindings +kubectl auth can-i create ClusterRoleBindings yes ``` @@ -1224,7 +1224,7 @@ Example error: Ensure that the role exists in the CNI namespace: ```bash -$ kubectl get role linkerd-cni -n linkerd-cni +kubectl get role linkerd-cni -n linkerd-cni NAME AGE linkerd-cni 52m ``` @@ -1232,7 +1232,7 @@ linkerd-cni 52m Also ensure you have permission to create Roles: ```bash -$ kubectl auth can-i create Roles -n linkerd-cni +kubectl auth can-i create Roles -n linkerd-cni yes ``` @@ -1249,7 +1249,7 @@ Example error: Ensure that the role binding exists in the CNI namespace: ```bash -$ kubectl get rolebinding linkerd-cni -n linkerd-cni +kubectl get rolebinding linkerd-cni -n linkerd-cni NAME AGE linkerd-cni 49m ``` @@ -1257,7 +1257,7 @@ linkerd-cni 49m Also ensure you have permission to create RoleBindings: ```bash -$ kubectl auth can-i create RoleBindings -n linkerd-cni +kubectl auth can-i create RoleBindings -n linkerd-cni yes ``` @@ -1274,7 +1274,7 @@ Example error: Ensure that the CNI service account exists in the CNI namespace: ```bash -$ kubectl get ServiceAccount linkerd-cni -n linkerd-cni +kubectl get ServiceAccount linkerd-cni -n linkerd-cni NAME SECRETS AGE linkerd-cni 1 45m ``` @@ -1282,7 +1282,7 @@ linkerd-cni 1 45m Also ensure you have permission to create ServiceAccount: ```bash -$ kubectl auth can-i create ServiceAccounts -n linkerd-cni +kubectl auth can-i create ServiceAccounts -n linkerd-cni yes ``` @@ -1299,7 +1299,7 @@ Example error: Ensure that the CNI daemonset exists in the CNI namespace: ```bash -$ kubectl get ds -n linkerd-cni +kubectl get ds -n linkerd-cni NAME DESIRED CURRENT READY UP-TO-DATE AVAILABLE NODE SELECTOR AGE linkerd-cni 1 1 1 1 1 beta.kubernetes.io/os=linux 14m ``` @@ -1307,7 +1307,7 @@ linkerd-cni 1 1 1 1 1 beta.kubernet Also ensure you have permission to create DaemonSets: ```bash -$ kubectl auth can-i create DaemonSets -n linkerd-cni +kubectl auth can-i create DaemonSets -n linkerd-cni yes ``` @@ -1324,7 +1324,7 @@ Example failure: Ensure that all the CNI pods are running: ```bash -$ kubectl get po -n linkerd-cn +kubectl get po -n linkerd-cn NAME READY STATUS RESTARTS AGE linkerd-cni-rzp2q 1/1 Running 0 9m20s linkerd-cni-mf564 1/1 Running 0 9m22s @@ -1334,7 +1334,7 @@ linkerd-cni-p5670 1/1 Running 0 9m25s Ensure that all pods have finished the deployment of the CNI config and binary: ```bash -$ kubectl logs linkerd-cni-rzp2q -n linkerd-cni +kubectl logs linkerd-cni-rzp2q -n linkerd-cni Wrote linkerd CNI binaries to /host/opt/cni/bin Created CNI config /host/etc/cni/net.d/10-kindnet.conflist Done configuring CNI. Sleep=true @@ -1362,7 +1362,7 @@ Make sure multicluster extension is correctly installed and that the `links.multicluster.linkerd.io` CRD is present. ```bash -$ kubectl get crds | grep multicluster +kubectl get crds | grep multicluster NAME CREATED AT links.multicluster.linkerd.io 2021-03-10T09:58:10Z ``` @@ -1441,7 +1441,7 @@ the rules section. Expected rules for `linkerd-service-mirror-access-local-resources` cluster role: ```bash -$ kubectl --context=local get clusterrole linkerd-service-mirror-access-local-resources -o yaml +kubectl --context=local get clusterrole linkerd-service-mirror-access-local-resources -o yaml kind: ClusterRole metadata: labels: @@ -1474,7 +1474,7 @@ rules: Expected rules for `linkerd-service-mirror-read-remote-creds` role: ```bash -$ kubectl --context=local get role linkerd-service-mirror-read-remote-creds -n linkerd-multicluster -o yaml +kubectl --context=local get role linkerd-service-mirror-read-remote-creds -n linkerd-multicluster -o yaml kind: Role metadata: labels: @@ -1507,7 +1507,7 @@ everything to start up. If this is a permanent error, you'll want to validate the state of the controller pod with: ```bash -$ kubectl --all-namespaces get po --selector linkerd.io/control-plane-component=linkerd-service-mirror +kubectl --all-namespaces get po --selector linkerd.io/control-plane-component=linkerd-service-mirror NAME READY STATUS RESTARTS AGE linkerd-service-mirror-7bb8ff5967-zg265 2/2 Running 0 50m ``` @@ -1606,7 +1606,7 @@ Example failure: Ensure the linkerd-viz extension ClusterRoles exist: ```bash -$ kubectl get clusterroles | grep linkerd-viz +kubectl get clusterroles | grep linkerd-viz linkerd-linkerd-viz-metrics-api 2021-01-26T18:02:17Z linkerd-linkerd-viz-prometheus 2021-01-26T18:02:17Z linkerd-linkerd-viz-tap 2021-01-26T18:02:17Z @@ -1617,7 +1617,7 @@ linkerd-linkerd-viz-web-check 2021-01-2 Also ensure you have permission to create ClusterRoles: ```bash -$ kubectl auth can-i create clusterroles +kubectl auth can-i create clusterroles yes ``` @@ -1634,7 +1634,7 @@ Example failure: Ensure the linkerd-viz extension ClusterRoleBindings exist: ```bash -$ kubectl get clusterrolebindings | grep linkerd-viz +kubectl get clusterrolebindings | grep linkerd-viz linkerd-linkerd-viz-metrics-api ClusterRole/linkerd-linkerd-viz-metrics-api 18h linkerd-linkerd-viz-prometheus ClusterRole/linkerd-linkerd-viz-prometheus 18h linkerd-linkerd-viz-tap ClusterRole/linkerd-linkerd-viz-tap 18h @@ -1646,7 +1646,7 @@ linkerd-linkerd-viz-web-check ClusterRole/linkerd-linke Also ensure you have permission to create ClusterRoleBindings: ```bash -$ kubectl auth can-i create clusterrolebindings +kubectl auth can-i create clusterrolebindings yes ``` @@ -1718,7 +1718,7 @@ requirements in the cluster: Ensure all the linkerd-viz pods are injected ```bash -$ kubectl -n linkerd-viz get pods +kubectl -n linkerd-viz get pods NAME READY STATUS RESTARTS AGE grafana-68cddd7cc8-nrv4h 2/2 Running 3 18h metrics-api-77f684f7c7-hnw8r 2/2 Running 2 18h @@ -1742,7 +1742,7 @@ Make sure that the `proxy-injector` is working correctly by running Ensure all the linkerd-viz pods are running with 2/2 ```bash -$ kubectl -n linkerd-viz get pods +kubectl -n linkerd-viz get pods NAME READY STATUS RESTARTS AGE grafana-68cddd7cc8-nrv4h 2/2 Running 3 18h metrics-api-77f684f7c7-hnw8r 2/2 Running 2 18h @@ -1847,7 +1847,7 @@ Example failure: Ensure the linkerd-jaeger ServiceAccounts exist: ```bash -$ kubectl -n linkerd-jaeger get serviceaccounts +kubectl -n linkerd-jaeger get serviceaccounts NAME SECRETS AGE collector 1 23m jaeger 1 23m @@ -1857,7 +1857,7 @@ Also ensure you have permission to create ServiceAccounts in the linkerd-jaeger namespace: ```bash -$ kubectl -n linkerd-jaeger auth can-i create serviceaccounts +kubectl -n linkerd-jaeger auth can-i create serviceaccounts yes ``` @@ -1874,7 +1874,7 @@ Example failure: Ensure the Linkerd ConfigMap exists: ```bash -$ kubectl -n linkerd-jaeger get configmap/collector-config +kubectl -n linkerd-jaeger get configmap/collector-config NAME DATA AGE collector-config 1 61m ``` @@ -1882,7 +1882,7 @@ collector-config 1 61m Also ensure you have permission to create ConfigMaps: ```bash -$ kubectl -n linkerd-jaeger auth can-i create configmap +kubectl -n linkerd-jaeger auth can-i create configmap yes ``` @@ -1897,7 +1897,7 @@ yes Ensure all the jaeger pods are injected ```bash -$ kubectl -n linkerd-jaeger get pods +kubectl -n linkerd-jaeger get pods NAME READY STATUS RESTARTS AGE collector-69cc44dfbc-rhpfg 2/2 Running 0 11s jaeger-6f98d5c979-scqlq 2/2 Running 0 11s @@ -1918,7 +1918,7 @@ Make sure that the `proxy-injector` is working correctly by running Ensure all the linkerd-jaeger pods are running with 2/2 ```bash -$ kubectl -n linkerd-jaeger get pods +kubectl -n linkerd-jaeger get pods NAME READY STATUS RESTARTS AGE jaeger-injector-548684d74b-bcq5h 2/2 Running 0 5s collector-69cc44dfbc-wqf6s 2/2 Running 0 5s @@ -1967,7 +1967,7 @@ Ensure you can connect to the Linkerd Buoyant version check endpoint from the environment the `linkerd` cli is running: ```bash -$ curl --proto '=https' --tlsv1.2 -sSfL https://buoyant.cloud/version.json +curl --proto '=https' --tlsv1.2 -sSfL https://buoyant.cloud/version.json {"linkerd-buoyant":"v0.4.4"} ``` @@ -2032,7 +2032,7 @@ linkerd-buoyant install | kubectl apply -f - Ensure that the cluster role exists: ```bash -$ kubectl get clusterrole buoyant-cloud-agent +kubectl get clusterrole buoyant-cloud-agent NAME CREATED AT buoyant-cloud-agent 2020-11-13T00:59:50Z ``` @@ -2040,7 +2040,7 @@ buoyant-cloud-agent 2020-11-13T00:59:50Z Also ensure you have permission to create ClusterRoles: ```bash -$ kubectl auth can-i create ClusterRoles +kubectl auth can-i create ClusterRoles yes ``` @@ -2055,7 +2055,7 @@ yes Ensure that the cluster role binding exists: ```bash -$ kubectl get clusterrolebinding buoyant-cloud-agent +kubectl get clusterrolebinding buoyant-cloud-agent NAME ROLE AGE buoyant-cloud-agent ClusterRole/buoyant-cloud-agent 301d ``` @@ -2063,7 +2063,7 @@ buoyant-cloud-agent ClusterRole/buoyant-cloud-agent 301d Also ensure you have permission to create ClusterRoleBindings: ```bash -$ kubectl auth can-i create ClusterRoleBindings +kubectl auth can-i create ClusterRoleBindings yes ``` @@ -2078,7 +2078,7 @@ yes Ensure that the service account exists: ```bash -$ kubectl -n buoyant-cloud get serviceaccount buoyant-cloud-agent +kubectl -n buoyant-cloud get serviceaccount buoyant-cloud-agent NAME SECRETS AGE buoyant-cloud-agent 1 301d ``` @@ -2086,7 +2086,7 @@ buoyant-cloud-agent 1 301d Also ensure you have permission to create ServiceAccounts: ```bash -$ kubectl -n buoyant-cloud auth can-i create ServiceAccount +kubectl -n buoyant-cloud auth can-i create ServiceAccount yes ``` @@ -2101,7 +2101,7 @@ yes Ensure that the secret exists: ```bash -$ kubectl -n buoyant-cloud get secret buoyant-cloud-id +kubectl -n buoyant-cloud get secret buoyant-cloud-id NAME TYPE DATA AGE buoyant-cloud-id Opaque 4 301d ``` @@ -2109,7 +2109,7 @@ buoyant-cloud-id Opaque 4 301d Also ensure you have permission to create ServiceAccounts: ```bash -$ kubectl -n buoyant-cloud auth can-i create ServiceAccount +kubectl -n buoyant-cloud auth can-i create ServiceAccount yes ``` @@ -2147,7 +2147,7 @@ everything to start up. If this is a permanent error, you'll want to validate the state of the `buoyant-cloud-agent` Deployment with: ```bash -$ kubectl -n buoyant-cloud get po --selector app=buoyant-cloud-agent +kubectl -n buoyant-cloud get po --selector app=buoyant-cloud-agent NAME READY STATUS RESTARTS AGE buoyant-cloud-agent-6b8c6888d7-htr7d 2/2 Running 0 156m ``` @@ -2170,7 +2170,7 @@ Ensure the `buoyant-cloud-agent` pod is injected, the `READY` column should show `2/2`: ```bash -$ kubectl -n buoyant-cloud get pods --selector app=buoyant-cloud-agent +kubectl -n buoyant-cloud get pods --selector app=buoyant-cloud-agent NAME READY STATUS RESTARTS AGE buoyant-cloud-agent-6b8c6888d7-htr7d 2/2 Running 0 161m ``` @@ -2189,7 +2189,7 @@ Make sure that the `proxy-injector` is working correctly by running Check the version with: ```bash -$ linkerd-buoyant version +linkerd-buoyant version CLI version: v0.4.4 Agent version: v0.4.4 ``` @@ -2248,7 +2248,7 @@ everything to start up. If this is a permanent error, you'll want to validate the state of the `buoyant-cloud-metrics` DaemonSet with: ```bash -$ kubectl -n buoyant-cloud get po --selector app=buoyant-cloud-metrics +kubectl -n buoyant-cloud get po --selector app=buoyant-cloud-metrics NAME READY STATUS RESTARTS AGE buoyant-cloud-metrics-kt9mv 2/2 Running 0 163m buoyant-cloud-metrics-q8jhj 2/2 Running 0 163m @@ -2274,7 +2274,7 @@ Ensure the `buoyant-cloud-metrics` pods are injected, the `READY` column should show `2/2`: ```bash -$ kubectl -n buoyant-cloud get pods --selector app=buoyant-cloud-metrics +kubectl -n buoyant-cloud get pods --selector app=buoyant-cloud-metrics NAME READY STATUS RESTARTS AGE buoyant-cloud-metrics-kt9mv 2/2 Running 0 166m buoyant-cloud-metrics-q8jhj 2/2 Running 0 166m @@ -2296,7 +2296,7 @@ Make sure that the `proxy-injector` is working correctly by running Check the version with: ```bash -$ kubectl -n buoyant-cloud get daemonset/buoyant-cloud-metrics -o jsonpath='{.metadata.labels}' +kubectl -n buoyant-cloud get daemonset/buoyant-cloud-metrics -o jsonpath='{.metadata.labels}' {"app.kubernetes.io/name":"metrics","app.kubernetes.io/part-of":"buoyant-cloud","app.kubernetes.io/version":"v0.4.4"} ``` diff --git a/linkerd.io/content/2.11/reference/cli/check.md b/linkerd.io/content/2.11/reference/cli/check.md index 312891f8ef..578e3722d4 100644 --- a/linkerd.io/content/2.11/reference/cli/check.md +++ b/linkerd.io/content/2.11/reference/cli/check.md @@ -12,7 +12,7 @@ for a full list of all the possible checks, what they do and how to fix them. ## Example output ```bash -$ linkerd check +linkerd check kubernetes-api -------------- √ can initialize the client diff --git a/linkerd.io/content/2.11/reference/iptables.md b/linkerd.io/content/2.11/reference/iptables.md index 67a7ea89de..9b4d229a59 100644 --- a/linkerd.io/content/2.11/reference/iptables.md +++ b/linkerd.io/content/2.11/reference/iptables.md @@ -164,7 +164,7 @@ Alternatively, if you want to inspect the iptables rules created for a pod, you can retrieve them through the following command: ```bash -$ kubectl -n logs linkerd-init +kubectl -n logs linkerd-init # where is the name of the pod # you want to see the iptables rules for ``` diff --git a/linkerd.io/content/2.11/tasks/getting-per-route-metrics.md b/linkerd.io/content/2.11/tasks/getting-per-route-metrics.md index 424ede9217..7d6120773c 100644 --- a/linkerd.io/content/2.11/tasks/getting-per-route-metrics.md +++ b/linkerd.io/content/2.11/tasks/getting-per-route-metrics.md @@ -14,7 +14,7 @@ For a tutorial that shows this functionality off, check out the You can view per-route metrics in the CLI by running `linkerd viz routes`: ```bash -$ linkerd viz routes svc/webapp +linkerd viz routes svc/webapp ROUTE SERVICE SUCCESS RPS LATENCY_P50 LATENCY_P95 LATENCY_P99 GET / webapp 100.00% 0.6rps 25ms 30ms 30ms GET /authors/{id} webapp 100.00% 0.6rps 22ms 29ms 30ms @@ -34,7 +34,7 @@ specified in your service profile will end up there. It is also possible to look the metrics up by other resource types, such as: ```bash -$ linkerd viz routes deploy/webapp +linkerd viz routes deploy/webapp ROUTE SERVICE SUCCESS RPS LATENCY_P50 LATENCY_P95 LATENCY_P99 [DEFAULT] kubernetes 0.00% 0.0rps 0ms 0ms 0ms GET / webapp 100.00% 0.5rps 27ms 38ms 40ms @@ -53,7 +53,7 @@ Then, it is possible to filter all the way down to requests going from a specific resource to other services: ```bash -$ linkerd viz routes deploy/webapp --to svc/books +linkerd viz routes deploy/webapp --to svc/books ROUTE SERVICE SUCCESS RPS LATENCY_P50 LATENCY_P95 LATENCY_P99 DELETE /books/{id}.json books 100.00% 0.5rps 18ms 29ms 30ms GET /books.json books 100.00% 1.1rps 7ms 12ms 18ms diff --git a/linkerd.io/content/2.11/tasks/multicluster-using-statefulsets.md b/linkerd.io/content/2.11/tasks/multicluster-using-statefulsets.md index 9d8730b5b0..c720c09563 100644 --- a/linkerd.io/content/2.11/tasks/multicluster-using-statefulsets.md +++ b/linkerd.io/content/2.11/tasks/multicluster-using-statefulsets.md @@ -48,8 +48,8 @@ The first step is to clone the demo repository on your local machine. ```sh # clone example repository -$ git clone git@github.com:mateiidavid/l2d-k3d-statefulset.git -$ cd l2d-k3d-statefulset +git clone git@github.com:mateiidavid/l2d-k3d-statefulset.git +cd l2d-k3d-statefulset ``` The second step consists of creating two `k3d` clusters named `east` and `west`, @@ -60,10 +60,10 @@ everything. ```sh # create k3d clusters -$ ./create.sh +./create.sh # list the clusters -$ k3d cluster list +k3d cluster list NAME SERVERS AGENTS LOADBALANCER east 1/1 0/0 true west 1/1 0/0 true @@ -78,10 +78,10 @@ provided scripts, but feel free to have a look! ```sh # Install Linkerd and multicluster, output to check should be a success -$ ./install.sh +./install.sh # Next, link the two clusters together -$ ./link.sh +./link.sh ``` Perfect! If you've made it this far with no errors, then it's a good sign. In @@ -101,17 +101,17 @@ communication. First, we will deploy our pods and services: ```sh # deploy services and mesh namespaces -$ ./deploy.sh +./deploy.sh # verify both clusters # # verify east -$ kubectl --context=k3d-east get pods +kubectl --context=k3d-east get pods NAME READY STATUS RESTARTS AGE curl-56dc7d945d-96r6p 2/2 Running 0 7s # verify west has headless service -$ kubectl --context=k3d-west get services +kubectl --context=k3d-west get services NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE kubernetes ClusterIP 10.43.0.1 443/TCP 10m nginx-svc ClusterIP None 80/TCP 8s @@ -119,7 +119,7 @@ nginx-svc ClusterIP None 80/TCP 8s # verify west has statefulset # # this may take a while to come up -$ kubectl --context=k3d-west get pods +kubectl --context=k3d-west get pods NAME READY STATUS RESTARTS AGE nginx-set-0 2/2 Running 0 53s nginx-set-1 2/2 Running 0 43s @@ -130,7 +130,7 @@ Before we go further, let's have a look at the endpoints object for the `nginx-svc`: ```sh -$ kubectl --context=k3d-west get endpoints nginx-svc -o yaml +kubectl --context=k3d-west get endpoints nginx-svc -o yaml ... subsets: - addresses: @@ -170,23 +170,23 @@ would get an answer back. We can test this out by applying the curl pod to the `west` cluster: ```sh -$ kubectl --context=k3d-west apply -f east/curl.yml -$ kubectl --context=k3d-west get pods +kubectl --context=k3d-west apply -f east/curl.yml +kubectl --context=k3d-west get pods NAME READY STATUS RESTARTS AGE nginx-set-0 2/2 Running 0 5m8s nginx-set-1 2/2 Running 0 4m58s nginx-set-2 2/2 Running 0 4m51s curl-56dc7d945d-s4n8j 0/2 PodInitializing 0 4s -$ kubectl --context=k3d-west exec -it curl-56dc7d945d-s4n8j -c curl -- bin/sh -/$ # prompt for curl pod +kubectl --context=k3d-west exec -it curl-56dc7d945d-s4n8j -c curl -- bin/sh +/# prompt for curl pod ``` If we now curl one of these instances, we will get back a response. ```sh # exec'd on the pod -/ $ curl nginx-set-0.nginx-svc.default.svc.west.cluster.local +/ curl nginx-set-0.nginx-svc.default.svc.west.cluster.local " @@ -218,10 +218,10 @@ Now, let's do the same, but this time from the `east` cluster. We will first export the service. ```sh -$ kubectl --context=k3d-west label service nginx-svc mirror.linkerd.io/exported="true" +kubectl --context=k3d-west label service nginx-svc mirror.linkerd.io/exported="true" service/nginx-svc labeled -$ kubectl --context=k3d-east get services +kubectl --context=k3d-east get services NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE kubernetes ClusterIP 10.43.0.1 443/TCP 20h nginx-svc-west ClusterIP None 80/TCP 29s @@ -235,7 +235,7 @@ endpoints for `nginx-svc-west` will have the same hostnames, but each hostname will point to one of the services we see above: ```sh -$ kubectl --context=k3d-east get endpoints nginx-svc-west -o yaml +kubectl --context=k3d-east get endpoints nginx-svc-west -o yaml subsets: - addresses: - hostname: nginx-set-0 @@ -251,17 +251,17 @@ cluster (`west`), will be mirrored as a clusterIP service. We will see in a second why this matters. ```sh -$ kubectl --context=k3d-east get pods +kubectl --context=k3d-east get pods NAME READY STATUS RESTARTS AGE curl-56dc7d945d-96r6p 2/2 Running 0 23m # exec and curl -$ kubectl --context=k3d-east exec pod curl-56dc7d945d-96r6p -it -c curl -- bin/sh +kubectl --context=k3d-east exec pod curl-56dc7d945d-96r6p -it -c curl -- bin/sh # we want to curl the same hostname we see in the endpoints object above. # however, the service and cluster domain will now be different, since we # are in a different cluster. # -/ $ curl nginx-set-0.nginx-svc-west.default.svc.east.cluster.local +/ curl nginx-set-0.nginx-svc-west.default.svc.east.cluster.local @@ -329,8 +329,8 @@ validation. To clean-up, you can remove both clusters entirely using the k3d CLI: ```sh -$ k3d cluster delete east +k3d cluster delete east cluster east deleted -$ k3d cluster delete west +k3d cluster delete west cluster west deleted ``` diff --git a/linkerd.io/content/2.11/tasks/restricting-access.md b/linkerd.io/content/2.11/tasks/restricting-access.md index cb4db5c857..7f79a0478b 100644 --- a/linkerd.io/content/2.11/tasks/restricting-access.md +++ b/linkerd.io/content/2.11/tasks/restricting-access.md @@ -16,27 +16,27 @@ Ensure that you have Linkerd version stable-2.11.0 or later installed, and that it is healthy: ```bash -$ linkerd install | kubectl apply -f - +linkerd install | kubectl apply -f - ... -$ linkerd check -o short +linkerd check -o short ... ``` Inject and install the Emojivoto application: ```bash -$ linkerd inject https://run.linkerd.io/emojivoto.yml | kubectl apply -f - +linkerd inject https://run.linkerd.io/emojivoto.yml | kubectl apply -f - ... -$ linkerd check -n emojivoto --proxy -o short +linkerd check -n emojivoto --proxy -o short ... ``` In order to observe what's going on, we'll also install the Viz extension: ```bash -$ linkerd viz install | kubectl apply -f - +linkerd viz install | kubectl apply -f - ... -$ linkerd viz check +linkerd viz check ... ``` diff --git a/linkerd.io/content/2.11/tasks/securing-your-cluster.md b/linkerd.io/content/2.11/tasks/securing-your-cluster.md index 94d8f7dcc2..6c0efb9462 100644 --- a/linkerd.io/content/2.11/tasks/securing-your-cluster.md +++ b/linkerd.io/content/2.11/tasks/securing-your-cluster.md @@ -54,7 +54,7 @@ kubectl auth can-i watch deployments.tap.linkerd.io -n emojivoto --as $(whoami) You can also use the Linkerd CLI's `--as` flag to confirm: ```bash -$ linkerd viz tap -n linkerd deploy/linkerd-controller --as $(whoami) +linkerd viz tap -n linkerd deploy/linkerd-controller --as $(whoami) Cannot connect to Linkerd Viz: namespaces is forbidden: User "XXXX" cannot list resource "namespaces" in API group "" at the cluster scope Validate the install with: linkerd viz check ... @@ -71,7 +71,7 @@ To enable tap access to all resources in all namespaces, you may bind your user to the `linkerd-linkerd-tap-admin` ClusterRole, installed by default: ```bash -$ kubectl describe clusterroles/linkerd-linkerd-viz-tap-admin +kubectl describe clusterroles/linkerd-linkerd-viz-tap-admin Name: linkerd-linkerd-viz-tap-admin Labels: component=tap linkerd.io/extension=viz @@ -103,7 +103,7 @@ kubectl create clusterrolebinding \ You can verify you now have tap access with: ```bash -$ linkerd viz tap -n linkerd deploy/linkerd-controller --as $(whoami) +linkerd viz tap -n linkerd deploy/linkerd-controller --as $(whoami) req id=3:0 proxy=in src=10.244.0.1:37392 dst=10.244.0.13:9996 tls=not_provided_by_remote :method=GET :authority=10.244.0.13:9996 :path=/ping ... ``` @@ -137,14 +137,14 @@ Because GCloud provides this additional level of access, there are cases where not. To validate this, check whether your GCloud user has Tap access: ```bash -$ kubectl auth can-i watch pods.tap.linkerd.io --all-namespaces +kubectl auth can-i watch pods.tap.linkerd.io --all-namespaces yes ``` And then validate whether your RBAC user has Tap access: ```bash -$ kubectl auth can-i watch pods.tap.linkerd.io --all-namespaces --as $(gcloud config get-value account) +kubectl auth can-i watch pods.tap.linkerd.io --all-namespaces --as $(gcloud config get-value account) no - no RBAC policy matched ``` @@ -181,14 +181,14 @@ privileges necessary to tap resources. To confirm: ```bash -$ kubectl auth can-i watch pods.tap.linkerd.io --all-namespaces --as system:serviceaccount:linkerd-viz:web +kubectl auth can-i watch pods.tap.linkerd.io --all-namespaces --as system:serviceaccount:linkerd-viz:web yes ``` This access is enabled via a `linkerd-linkerd-viz-web-admin` ClusterRoleBinding: ```bash -$ kubectl describe clusterrolebindings/linkerd-linkerd-viz-web-admin +kubectl describe clusterrolebindings/linkerd-linkerd-viz-web-admin Name: linkerd-linkerd-viz-web-admin Labels: component=web linkerd.io/extensions=viz @@ -221,6 +221,6 @@ kubectl delete clusterrolebindings/linkerd-linkerd-viz-web-admin To confirm: ```bash -$ kubectl auth can-i watch pods.tap.linkerd.io --all-namespaces --as system:serviceaccount:linkerd-viz:web +kubectl auth can-i watch pods.tap.linkerd.io --all-namespaces --as system:serviceaccount:linkerd-viz:web no ``` diff --git a/linkerd.io/content/2.11/tasks/troubleshooting.md b/linkerd.io/content/2.11/tasks/troubleshooting.md index 3a988974a6..78aa233661 100644 --- a/linkerd.io/content/2.11/tasks/troubleshooting.md +++ b/linkerd.io/content/2.11/tasks/troubleshooting.md @@ -314,7 +314,7 @@ Example failure: Ensure the Linkerd ClusterRoles exist: ```bash -$ kubectl get clusterroles | grep linkerd +kubectl get clusterroles | grep linkerd linkerd-linkerd-destination 9d linkerd-linkerd-identity 9d linkerd-linkerd-proxy-injector 9d @@ -324,7 +324,7 @@ linkerd-policy 9d Also ensure you have permission to create ClusterRoles: ```bash -$ kubectl auth can-i create clusterroles +kubectl auth can-i create clusterroles yes ``` @@ -341,7 +341,7 @@ Example failure: Ensure the Linkerd ClusterRoleBindings exist: ```bash -$ kubectl get clusterrolebindings | grep linkerd +kubectl get clusterrolebindings | grep linkerd linkerd-linkerd-destination 9d linkerd-linkerd-identity 9d linkerd-linkerd-proxy-injector 9d @@ -351,7 +351,7 @@ linkerd-destination-policy 9d Also ensure you have permission to create ClusterRoleBindings: ```bash -$ kubectl auth can-i create clusterrolebindings +kubectl auth can-i create clusterrolebindings yes ``` @@ -368,7 +368,7 @@ Example failure: Ensure the Linkerd ServiceAccounts exist: ```bash -$ kubectl -n linkerd get serviceaccounts +kubectl -n linkerd get serviceaccounts NAME SECRETS AGE default 1 14m linkerd-destination 1 14m @@ -381,7 +381,7 @@ Also ensure you have permission to create ServiceAccounts in the Linkerd namespace: ```bash -$ kubectl -n linkerd auth can-i create serviceaccounts +kubectl -n linkerd auth can-i create serviceaccounts yes ``` @@ -398,7 +398,7 @@ Example failure: Ensure the Linkerd CRD exists: ```bash -$ kubectl get customresourcedefinitions +kubectl get customresourcedefinitions NAME CREATED AT serviceprofiles.linkerd.io 2019-04-25T21:47:31Z ``` @@ -406,7 +406,7 @@ serviceprofiles.linkerd.io 2019-04-25T21:47:31Z Also ensure you have permission to create CRDs: ```bash -$ kubectl auth can-i create customresourcedefinitions +kubectl auth can-i create customresourcedefinitions yes ``` @@ -423,14 +423,14 @@ Example failure: Ensure the Linkerd MutatingWebhookConfigurations exists: ```bash -$ kubectl get mutatingwebhookconfigurations | grep linkerd +kubectl get mutatingwebhookconfigurations | grep linkerd linkerd-proxy-injector-webhook-config 2019-07-01T13:13:26Z ``` Also ensure you have permission to create MutatingWebhookConfigurations: ```bash -$ kubectl auth can-i create mutatingwebhookconfigurations +kubectl auth can-i create mutatingwebhookconfigurations yes ``` @@ -447,14 +447,14 @@ Example failure: Ensure the Linkerd ValidatingWebhookConfiguration exists: ```bash -$ kubectl get validatingwebhookconfigurations | grep linkerd +kubectl get validatingwebhookconfigurations | grep linkerd linkerd-sp-validator-webhook-config 2019-07-01T13:13:26Z ``` Also ensure you have permission to create ValidatingWebhookConfigurations: ```bash -$ kubectl auth can-i create validatingwebhookconfigurations +kubectl auth can-i create validatingwebhookconfigurations yes ``` @@ -471,14 +471,14 @@ Example failure: Ensure the Linkerd PodSecurityPolicy exists: ```bash -$ kubectl get podsecuritypolicies | grep linkerd +kubectl get podsecuritypolicies | grep linkerd linkerd-linkerd-control-plane false NET_ADMIN,NET_RAW RunAsAny RunAsAny MustRunAs MustRunAs true configMap,emptyDir,secret,projected,downwardAPI,persistentVolumeClaim ``` Also ensure you have permission to create PodSecurityPolicies: ```bash -$ kubectl auth can-i create podsecuritypolicies +kubectl auth can-i create podsecuritypolicies yes ``` @@ -526,7 +526,7 @@ Example failure: Ensure the Linkerd ConfigMap exists: ```bash -$ kubectl -n linkerd get configmap/linkerd-config +kubectl -n linkerd get configmap/linkerd-config NAME DATA AGE linkerd-config 3 61m ``` @@ -534,7 +534,7 @@ linkerd-config 3 61m Also ensure you have permission to create ConfigMaps: ```bash -$ kubectl -n linkerd auth can-i create configmap +kubectl -n linkerd auth can-i create configmap yes ``` @@ -888,7 +888,7 @@ Example failure: Verify the state of the control plane pods with: ```bash -$ kubectl -n linkerd get po +kubectl -n linkerd get po NAME READY STATUS RESTARTS AGE linkerd-destination-5fd7b5d466-szgqm 2/2 Running 1 12m linkerd-identity-54df78c479-hbh5m 2/2 Running 0 12m @@ -990,7 +990,7 @@ Ensure you can connect to the Linkerd version check endpoint from the environment the `linkerd` cli is running: ```bash -$ curl "https://versioncheck.linkerd.io/version.json?version=edge-19.1.2&uuid=test-uuid&source=cli" +curl "https://versioncheck.linkerd.io/version.json?version=edge-19.1.2&uuid=test-uuid&source=cli" {"stable":"stable-2.1.0","edge":"edge-19.1.2"} ``` @@ -1049,7 +1049,7 @@ normally. Example failure: ```bash -$ linkerd check --proxy --namespace foo +linkerd check --proxy --namespace foo ... × data plane namespace exists The "foo" namespace does not exist @@ -1198,7 +1198,7 @@ Ensure the kube-system namespace has the `config.linkerd.io/admission-webhooks:disabled` label: ```bash -$ kubectl get namespace kube-system -oyaml +kubectl get namespace kube-system -oyaml kind: Namespace apiVersion: v1 metadata: @@ -1271,7 +1271,7 @@ Example error: Ensure that the linkerd-cni-config ConfigMap exists in the CNI namespace: ```bash -$ kubectl get cm linkerd-cni-config -n linkerd-cni +kubectl get cm linkerd-cni-config -n linkerd-cni NAME PRIV CAPS SELINUX RUNASUSER FSGROUP SUPGROUP READONLYROOTFS VOLUMES linkerd-linkerd-cni-cni false RunAsAny RunAsAny RunAsAny RunAsAny false hostPath,secret ``` @@ -1279,7 +1279,7 @@ linkerd-linkerd-cni-cni false RunAsAny RunAsAny RunAsAny RunAs Also ensure you have permission to create ConfigMaps: ```bash -$ kubectl auth can-i create ConfigMaps +kubectl auth can-i create ConfigMaps yes ``` @@ -1296,7 +1296,7 @@ Example error: Ensure that the pod security policy exists: ```bash -$ kubectl get psp linkerd-linkerd-cni-cni +kubectl get psp linkerd-linkerd-cni-cni NAME PRIV CAPS SELINUX RUNASUSER FSGROUP SUPGROUP READONLYROOTFS VOLUMES linkerd-linkerd-cni-cni false RunAsAny RunAsAny RunAsAny RunAsAny false hostPath,secret ``` @@ -1304,7 +1304,7 @@ linkerd-linkerd-cni-cni false RunAsAny RunAsAny RunAsAny RunAs Also ensure you have permission to create PodSecurityPolicies: ```bash -$ kubectl auth can-i create PodSecurityPolicies +kubectl auth can-i create PodSecurityPolicies yes ``` @@ -1321,7 +1321,7 @@ Example error: Ensure that the cluster role exists: ```bash -$ kubectl get clusterrole linkerd-cni +kubectl get clusterrole linkerd-cni NAME AGE linkerd-cni 54m ``` @@ -1329,7 +1329,7 @@ linkerd-cni 54m Also ensure you have permission to create ClusterRoles: ```bash -$ kubectl auth can-i create ClusterRoles +kubectl auth can-i create ClusterRoles yes ``` @@ -1346,7 +1346,7 @@ Example error: Ensure that the cluster role binding exists: ```bash -$ kubectl get clusterrolebinding linkerd-cni +kubectl get clusterrolebinding linkerd-cni NAME AGE linkerd-cni 54m ``` @@ -1354,7 +1354,7 @@ linkerd-cni 54m Also ensure you have permission to create ClusterRoleBindings: ```bash -$ kubectl auth can-i create ClusterRoleBindings +kubectl auth can-i create ClusterRoleBindings yes ``` @@ -1371,7 +1371,7 @@ Example error: Ensure that the role exists in the CNI namespace: ```bash -$ kubectl get role linkerd-cni -n linkerd-cni +kubectl get role linkerd-cni -n linkerd-cni NAME AGE linkerd-cni 52m ``` @@ -1379,7 +1379,7 @@ linkerd-cni 52m Also ensure you have permission to create Roles: ```bash -$ kubectl auth can-i create Roles -n linkerd-cni +kubectl auth can-i create Roles -n linkerd-cni yes ``` @@ -1396,7 +1396,7 @@ Example error: Ensure that the role binding exists in the CNI namespace: ```bash -$ kubectl get rolebinding linkerd-cni -n linkerd-cni +kubectl get rolebinding linkerd-cni -n linkerd-cni NAME AGE linkerd-cni 49m ``` @@ -1404,7 +1404,7 @@ linkerd-cni 49m Also ensure you have permission to create RoleBindings: ```bash -$ kubectl auth can-i create RoleBindings -n linkerd-cni +kubectl auth can-i create RoleBindings -n linkerd-cni yes ``` @@ -1421,7 +1421,7 @@ Example error: Ensure that the CNI service account exists in the CNI namespace: ```bash -$ kubectl get ServiceAccount linkerd-cni -n linkerd-cni +kubectl get ServiceAccount linkerd-cni -n linkerd-cni NAME SECRETS AGE linkerd-cni 1 45m ``` @@ -1429,7 +1429,7 @@ linkerd-cni 1 45m Also ensure you have permission to create ServiceAccount: ```bash -$ kubectl auth can-i create ServiceAccounts -n linkerd-cni +kubectl auth can-i create ServiceAccounts -n linkerd-cni yes ``` @@ -1446,7 +1446,7 @@ Example error: Ensure that the CNI daemonset exists in the CNI namespace: ```bash -$ kubectl get ds -n linkerd-cni +kubectl get ds -n linkerd-cni NAME DESIRED CURRENT READY UP-TO-DATE AVAILABLE NODE SELECTOR AGE linkerd-cni 1 1 1 1 1 beta.kubernetes.io/os=linux 14m ``` @@ -1454,7 +1454,7 @@ linkerd-cni 1 1 1 1 1 beta.kubernet Also ensure you have permission to create DaemonSets: ```bash -$ kubectl auth can-i create DaemonSets -n linkerd-cni +kubectl auth can-i create DaemonSets -n linkerd-cni yes ``` @@ -1471,7 +1471,7 @@ Example failure: Ensure that all the CNI pods are running: ```bash -$ kubectl get po -n linkerd-cn +kubectl get po -n linkerd-cn NAME READY STATUS RESTARTS AGE linkerd-cni-rzp2q 1/1 Running 0 9m20s linkerd-cni-mf564 1/1 Running 0 9m22s @@ -1481,7 +1481,7 @@ linkerd-cni-p5670 1/1 Running 0 9m25s Ensure that all pods have finished the deployment of the CNI config and binary: ```bash -$ kubectl logs linkerd-cni-rzp2q -n linkerd-cni +kubectl logs linkerd-cni-rzp2q -n linkerd-cni Wrote linkerd CNI binaries to /host/opt/cni/bin Created CNI config /host/etc/cni/net.d/10-kindnet.conflist Done configuring CNI. Sleep=true @@ -1509,7 +1509,7 @@ Make sure multicluster extension is correctly installed and that the `links.multicluster.linkerd.io` CRD is present. ```bash -$ kubectl get crds | grep multicluster +kubectl get crds | grep multicluster NAME CREATED AT links.multicluster.linkerd.io 2021-03-10T09:58:10Z ``` @@ -1588,7 +1588,7 @@ the rules section. Expected rules for `linkerd-service-mirror-access-local-resources` cluster role: ```bash -$ kubectl --context=local get clusterrole linkerd-service-mirror-access-local-resources -o yaml +kubectl --context=local get clusterrole linkerd-service-mirror-access-local-resources -o yaml kind: ClusterRole metadata: labels: @@ -1621,7 +1621,7 @@ rules: Expected rules for `linkerd-service-mirror-read-remote-creds` role: ```bash -$ kubectl --context=local get role linkerd-service-mirror-read-remote-creds -n linkerd-multicluster -o yaml +kubectl --context=local get role linkerd-service-mirror-read-remote-creds -n linkerd-multicluster -o yaml kind: Role metadata: labels: @@ -1654,7 +1654,7 @@ everything to start up. If this is a permanent error, you'll want to validate the state of the controller pod with: ```bash -$ kubectl --all-namespaces get po --selector linkerd.io/control-plane-component=linkerd-service-mirror +kubectl --all-namespaces get po --selector linkerd.io/control-plane-component=linkerd-service-mirror NAME READY STATUS RESTARTS AGE linkerd-service-mirror-7bb8ff5967-zg265 2/2 Running 0 50m ``` @@ -1753,7 +1753,7 @@ Example failure: Ensure the linkerd-viz extension ClusterRoles exist: ```bash -$ kubectl get clusterroles | grep linkerd-viz +kubectl get clusterroles | grep linkerd-viz linkerd-linkerd-viz-metrics-api 2021-01-26T18:02:17Z linkerd-linkerd-viz-prometheus 2021-01-26T18:02:17Z linkerd-linkerd-viz-tap 2021-01-26T18:02:17Z @@ -1764,7 +1764,7 @@ linkerd-linkerd-viz-web-check 2021-01-2 Also ensure you have permission to create ClusterRoles: ```bash -$ kubectl auth can-i create clusterroles +kubectl auth can-i create clusterroles yes ``` @@ -1781,7 +1781,7 @@ Example failure: Ensure the linkerd-viz extension ClusterRoleBindings exist: ```bash -$ kubectl get clusterrolebindings | grep linkerd-viz +kubectl get clusterrolebindings | grep linkerd-viz linkerd-linkerd-viz-metrics-api ClusterRole/linkerd-linkerd-viz-metrics-api 18h linkerd-linkerd-viz-prometheus ClusterRole/linkerd-linkerd-viz-prometheus 18h linkerd-linkerd-viz-tap ClusterRole/linkerd-linkerd-viz-tap 18h @@ -1793,7 +1793,7 @@ linkerd-linkerd-viz-web-check ClusterRole/linkerd-linke Also ensure you have permission to create ClusterRoleBindings: ```bash -$ kubectl auth can-i create clusterrolebindings +kubectl auth can-i create clusterrolebindings yes ``` @@ -1865,7 +1865,7 @@ requirements in the cluster: Ensure all the linkerd-viz pods are injected ```bash -$ kubectl -n linkerd-viz get pods +kubectl -n linkerd-viz get pods NAME READY STATUS RESTARTS AGE grafana-68cddd7cc8-nrv4h 2/2 Running 3 18h metrics-api-77f684f7c7-hnw8r 2/2 Running 2 18h @@ -1889,7 +1889,7 @@ Make sure that the `proxy-injector` is working correctly by running Ensure all the linkerd-viz pods are running with 2/2 ```bash -$ kubectl -n linkerd-viz get pods +kubectl -n linkerd-viz get pods NAME READY STATUS RESTARTS AGE grafana-68cddd7cc8-nrv4h 2/2 Running 3 18h metrics-api-77f684f7c7-hnw8r 2/2 Running 2 18h @@ -1994,7 +1994,7 @@ Example failure: Ensure the linkerd-jaeger ServiceAccounts exist: ```bash -$ kubectl -n linkerd-jaeger get serviceaccounts +kubectl -n linkerd-jaeger get serviceaccounts NAME SECRETS AGE collector 1 23m jaeger 1 23m @@ -2004,7 +2004,7 @@ Also ensure you have permission to create ServiceAccounts in the linkerd-jaeger namespace: ```bash -$ kubectl -n linkerd-jaeger auth can-i create serviceaccounts +kubectl -n linkerd-jaeger auth can-i create serviceaccounts yes ``` @@ -2021,7 +2021,7 @@ Example failure: Ensure the Linkerd ConfigMap exists: ```bash -$ kubectl -n linkerd-jaeger get configmap/collector-config +kubectl -n linkerd-jaeger get configmap/collector-config NAME DATA AGE collector-config 1 61m ``` @@ -2029,7 +2029,7 @@ collector-config 1 61m Also ensure you have permission to create ConfigMaps: ```bash -$ kubectl -n linkerd-jaeger auth can-i create configmap +kubectl -n linkerd-jaeger auth can-i create configmap yes ``` @@ -2044,7 +2044,7 @@ yes Ensure all the jaeger pods are injected ```bash -$ kubectl -n linkerd-jaeger get pods +kubectl -n linkerd-jaeger get pods NAME READY STATUS RESTARTS AGE collector-69cc44dfbc-rhpfg 2/2 Running 0 11s jaeger-6f98d5c979-scqlq 2/2 Running 0 11s @@ -2065,7 +2065,7 @@ Make sure that the `proxy-injector` is working correctly by running Ensure all the linkerd-jaeger pods are running with 2/2 ```bash -$ kubectl -n linkerd-jaeger get pods +kubectl -n linkerd-jaeger get pods NAME READY STATUS RESTARTS AGE jaeger-injector-548684d74b-bcq5h 2/2 Running 0 5s collector-69cc44dfbc-wqf6s 2/2 Running 0 5s @@ -2114,7 +2114,7 @@ Ensure you can connect to the Linkerd Buoyant version check endpoint from the environment the `linkerd` cli is running: ```bash -$ curl https://buoyant.cloud/version.json +curl https://buoyant.cloud/version.json {"linkerd-buoyant":"v0.4.4"} ``` @@ -2179,7 +2179,7 @@ linkerd-buoyant install | kubectl apply -f - Ensure that the cluster role exists: ```bash -$ kubectl get clusterrole buoyant-cloud-agent +kubectl get clusterrole buoyant-cloud-agent NAME CREATED AT buoyant-cloud-agent 2020-11-13T00:59:50Z ``` @@ -2187,7 +2187,7 @@ buoyant-cloud-agent 2020-11-13T00:59:50Z Also ensure you have permission to create ClusterRoles: ```bash -$ kubectl auth can-i create ClusterRoles +kubectl auth can-i create ClusterRoles yes ``` @@ -2202,7 +2202,7 @@ yes Ensure that the cluster role binding exists: ```bash -$ kubectl get clusterrolebinding buoyant-cloud-agent +kubectl get clusterrolebinding buoyant-cloud-agent NAME ROLE AGE buoyant-cloud-agent ClusterRole/buoyant-cloud-agent 301d ``` @@ -2210,7 +2210,7 @@ buoyant-cloud-agent ClusterRole/buoyant-cloud-agent 301d Also ensure you have permission to create ClusterRoleBindings: ```bash -$ kubectl auth can-i create ClusterRoleBindings +kubectl auth can-i create ClusterRoleBindings yes ``` @@ -2225,7 +2225,7 @@ yes Ensure that the service account exists: ```bash -$ kubectl -n buoyant-cloud get serviceaccount buoyant-cloud-agent +kubectl -n buoyant-cloud get serviceaccount buoyant-cloud-agent NAME SECRETS AGE buoyant-cloud-agent 1 301d ``` @@ -2233,7 +2233,7 @@ buoyant-cloud-agent 1 301d Also ensure you have permission to create ServiceAccounts: ```bash -$ kubectl -n buoyant-cloud auth can-i create ServiceAccount +kubectl -n buoyant-cloud auth can-i create ServiceAccount yes ``` @@ -2248,7 +2248,7 @@ yes Ensure that the secret exists: ```bash -$ kubectl -n buoyant-cloud get secret buoyant-cloud-id +kubectl -n buoyant-cloud get secret buoyant-cloud-id NAME TYPE DATA AGE buoyant-cloud-id Opaque 4 301d ``` @@ -2256,7 +2256,7 @@ buoyant-cloud-id Opaque 4 301d Also ensure you have permission to create ServiceAccounts: ```bash -$ kubectl -n buoyant-cloud auth can-i create ServiceAccount +kubectl -n buoyant-cloud auth can-i create ServiceAccount yes ``` @@ -2294,7 +2294,7 @@ everything to start up. If this is a permanent error, you'll want to validate the state of the `buoyant-cloud-agent` Deployment with: ```bash -$ kubectl -n buoyant-cloud get po --selector app=buoyant-cloud-agent +kubectl -n buoyant-cloud get po --selector app=buoyant-cloud-agent NAME READY STATUS RESTARTS AGE buoyant-cloud-agent-6b8c6888d7-htr7d 2/2 Running 0 156m ``` @@ -2317,7 +2317,7 @@ Ensure the `buoyant-cloud-agent` pod is injected, the `READY` column should show `2/2`: ```bash -$ kubectl -n buoyant-cloud get pods --selector app=buoyant-cloud-agent +kubectl -n buoyant-cloud get pods --selector app=buoyant-cloud-agent NAME READY STATUS RESTARTS AGE buoyant-cloud-agent-6b8c6888d7-htr7d 2/2 Running 0 161m ``` @@ -2336,7 +2336,7 @@ Make sure that the `proxy-injector` is working correctly by running Check the version with: ```bash -$ linkerd-buoyant version +linkerd-buoyant version CLI version: v0.4.4 Agent version: v0.4.4 ``` @@ -2395,7 +2395,7 @@ everything to start up. If this is a permanent error, you'll want to validate the state of the `buoyant-cloud-metrics` DaemonSet with: ```bash -$ kubectl -n buoyant-cloud get po --selector app=buoyant-cloud-metrics +kubectl -n buoyant-cloud get po --selector app=buoyant-cloud-metrics NAME READY STATUS RESTARTS AGE buoyant-cloud-metrics-kt9mv 2/2 Running 0 163m buoyant-cloud-metrics-q8jhj 2/2 Running 0 163m @@ -2421,7 +2421,7 @@ Ensure the `buoyant-cloud-metrics` pods are injected, the `READY` column should show `2/2`: ```bash -$ kubectl -n buoyant-cloud get pods --selector app=buoyant-cloud-metrics +kubectl -n buoyant-cloud get pods --selector app=buoyant-cloud-metrics NAME READY STATUS RESTARTS AGE buoyant-cloud-metrics-kt9mv 2/2 Running 0 166m buoyant-cloud-metrics-q8jhj 2/2 Running 0 166m @@ -2443,7 +2443,7 @@ Make sure that the `proxy-injector` is working correctly by running Check the version with: ```bash -$ kubectl -n buoyant-cloud get daemonset/buoyant-cloud-metrics -o jsonpath='{.metadata.labels}' +kubectl -n buoyant-cloud get daemonset/buoyant-cloud-metrics -o jsonpath='{.metadata.labels}' {"app.kubernetes.io/name":"metrics","app.kubernetes.io/part-of":"buoyant-cloud","app.kubernetes.io/version":"v0.4.4"} ``` diff --git a/linkerd.io/content/2.12/reference/cli/check.md b/linkerd.io/content/2.12/reference/cli/check.md index 7cd61cd237..67a2486908 100644 --- a/linkerd.io/content/2.12/reference/cli/check.md +++ b/linkerd.io/content/2.12/reference/cli/check.md @@ -12,7 +12,7 @@ for a full list of all the possible checks, what they do and how to fix them. ## Example output ```bash -$ linkerd check +linkerd check kubernetes-api -------------- √ can initialize the client diff --git a/linkerd.io/content/2.12/reference/iptables.md b/linkerd.io/content/2.12/reference/iptables.md index 67a7ea89de..9b4d229a59 100644 --- a/linkerd.io/content/2.12/reference/iptables.md +++ b/linkerd.io/content/2.12/reference/iptables.md @@ -164,7 +164,7 @@ Alternatively, if you want to inspect the iptables rules created for a pod, you can retrieve them through the following command: ```bash -$ kubectl -n logs linkerd-init +kubectl -n logs linkerd-init # where is the name of the pod # you want to see the iptables rules for ``` diff --git a/linkerd.io/content/2.12/tasks/configuring-per-route-policy.md b/linkerd.io/content/2.12/tasks/configuring-per-route-policy.md index 24606035be..fc1f8477be 100644 --- a/linkerd.io/content/2.12/tasks/configuring-per-route-policy.md +++ b/linkerd.io/content/2.12/tasks/configuring-per-route-policy.md @@ -30,7 +30,7 @@ haven't already done this. Inject and install the Books demo application: ```bash -$ kubectl create ns booksapp && \ +kubectl create ns booksapp && \ curl --proto '=https' --tlsv1.2 -sSfL https://run.linkerd.io/booksapp.yml \ | linkerd inject - \ | kubectl -n booksapp apply -f - @@ -44,21 +44,21 @@ run in the `booksapp` namespace. Confirm that the Linkerd data plane was injected successfully: ```bash -$ linkerd check -n booksapp --proxy -o short +linkerd check -n booksapp --proxy -o short ``` You can take a quick look at all the components that were added to your cluster by running: ```bash -$ kubectl -n booksapp get all +kubectl -n booksapp get all ``` Once the rollout has completed successfully, you can access the app itself by port-forwarding `webapp` locally: ```bash -$ kubectl -n booksapp port-forward svc/webapp 7000 & +kubectl -n booksapp port-forward svc/webapp 7000 & ``` Open [http://localhost:7000/](http://localhost:7000/) in your browser to see the @@ -87,7 +87,7 @@ First, let's run the `linkerd viz authz` command to list the authorization resources that currently exist for the `authors` deployment: ```bash -$ linkerd viz authz -n booksapp deploy/authors +linkerd viz authz -n booksapp deploy/authors ROUTE SERVER AUTHORIZATION UNAUTHORIZED SUCCESS RPS LATENCY_P50 LATENCY_P95 LATENCY_P99 default default:all-unauthenticated default/all-unauthenticated 0.0rps 70.31% 8.1rps 1ms 43ms 49ms probe default:all-unauthenticated default/probe 0.0rps 100.00% 0.3rps 1ms 1ms 1ms @@ -124,7 +124,7 @@ Now that we've defined a [`Server`] for the authors `Deployment`, we can run the currently unauthorized: ```bash -$ linkerd viz authz -n booksapp deploy/authors +linkerd viz authz -n booksapp deploy/authors ROUTE SERVER AUTHORIZATION UNAUTHORIZED SUCCESS RPS LATENCY_P50 LATENCY_P95 LATENCY_P99 default authors-server 9.5rps 0.00% 0.0rps 0ms 0ms 0ms probe authors-server default/probe 0.0rps 100.00% 0.1rps 1ms 1ms 1ms @@ -291,7 +291,7 @@ network (0.0.0.0). Running `linkerd viz authz` again, we can now see that our new policies exist: ```bash -$ linkerd viz authz -n booksapp deploy/authors +linkerd viz authz -n booksapp deploy/authors ROUTE SERVER AUTHORIZATION UNAUTHORIZED SUCCESS RPS LATENCY_P50 LATENCY_P95 LATENCY_P99 authors-get-route authors-server authorizationpolicy/authors-get-policy 0.0rps 100.00% 0.1rps 2ms 2ms 2ms authors-probe-route authors-server authorizationpolicy/authors-probe-policy 0.0rps 100.00% 0.1rps 1ms 1ms 1ms @@ -362,7 +362,7 @@ requests, but we haven't _authorized_ requests to that route. Running the requests to `authors-modify-route`: ```bash -$ linkerd viz authz -n booksapp deploy/authors +linkerd viz authz -n booksapp deploy/authors ROUTE SERVER AUTHORIZATION UNAUTHORIZED SUCCESS RPS LATENCY_P50 LATENCY_P95 LATENCY_P99 authors-get-route authors-server authorizationpolicy/authors-get-policy - - - - - - authors-modify-route authors-server 9.7rps 0.00% 0.0rps 0ms 0ms 0ms @@ -421,7 +421,7 @@ Running the `linkerd viz authz` command one last time, we now see that all traffic is authorized: ```bash -$ linkerd viz authz -n booksapp deploy/authors +linkerd viz authz -n booksapp deploy/authors ROUTE SERVER AUTHORIZATION UNAUTHORIZED SUCCESS RPS LATENCY_P50 LATENCY_P95 LATENCY_P99 authors-get-route authors-server authorizationpolicy/authors-get-policy 0.0rps 100.00% 0.1rps 0ms 0ms 0ms authors-modify-route authors-server authorizationpolicy/authors-modify-policy 0.0rps 100.00% 0.0rps 0ms 0ms 0ms diff --git a/linkerd.io/content/2.12/tasks/getting-per-route-metrics.md b/linkerd.io/content/2.12/tasks/getting-per-route-metrics.md index ddd2a4dc3c..9f66470e28 100644 --- a/linkerd.io/content/2.12/tasks/getting-per-route-metrics.md +++ b/linkerd.io/content/2.12/tasks/getting-per-route-metrics.md @@ -24,7 +24,7 @@ per-route authorization. You can view per-route metrics in the CLI by running `linkerd viz routes`: ```bash -$ linkerd viz routes svc/webapp +linkerd viz routes svc/webapp ROUTE SERVICE SUCCESS RPS LATENCY_P50 LATENCY_P95 LATENCY_P99 GET / webapp 100.00% 0.6rps 25ms 30ms 30ms GET /authors/{id} webapp 100.00% 0.6rps 22ms 29ms 30ms @@ -44,7 +44,7 @@ specified in your service profile will end up there. It is also possible to look the metrics up by other resource types, such as: ```bash -$ linkerd viz routes deploy/webapp +linkerd viz routes deploy/webapp ROUTE SERVICE SUCCESS RPS LATENCY_P50 LATENCY_P95 LATENCY_P99 [DEFAULT] kubernetes 0.00% 0.0rps 0ms 0ms 0ms GET / webapp 100.00% 0.5rps 27ms 38ms 40ms @@ -63,7 +63,7 @@ Then, it is possible to filter all the way down to requests going from a specific resource to other services: ```bash -$ linkerd viz routes deploy/webapp --to svc/books +linkerd viz routes deploy/webapp --to svc/books ROUTE SERVICE SUCCESS RPS LATENCY_P50 LATENCY_P95 LATENCY_P99 DELETE /books/{id}.json books 100.00% 0.5rps 18ms 29ms 30ms GET /books.json books 100.00% 1.1rps 7ms 12ms 18ms diff --git a/linkerd.io/content/2.12/tasks/multicluster-using-statefulsets.md b/linkerd.io/content/2.12/tasks/multicluster-using-statefulsets.md index 9d8730b5b0..c720c09563 100644 --- a/linkerd.io/content/2.12/tasks/multicluster-using-statefulsets.md +++ b/linkerd.io/content/2.12/tasks/multicluster-using-statefulsets.md @@ -48,8 +48,8 @@ The first step is to clone the demo repository on your local machine. ```sh # clone example repository -$ git clone git@github.com:mateiidavid/l2d-k3d-statefulset.git -$ cd l2d-k3d-statefulset +git clone git@github.com:mateiidavid/l2d-k3d-statefulset.git +cd l2d-k3d-statefulset ``` The second step consists of creating two `k3d` clusters named `east` and `west`, @@ -60,10 +60,10 @@ everything. ```sh # create k3d clusters -$ ./create.sh +./create.sh # list the clusters -$ k3d cluster list +k3d cluster list NAME SERVERS AGENTS LOADBALANCER east 1/1 0/0 true west 1/1 0/0 true @@ -78,10 +78,10 @@ provided scripts, but feel free to have a look! ```sh # Install Linkerd and multicluster, output to check should be a success -$ ./install.sh +./install.sh # Next, link the two clusters together -$ ./link.sh +./link.sh ``` Perfect! If you've made it this far with no errors, then it's a good sign. In @@ -101,17 +101,17 @@ communication. First, we will deploy our pods and services: ```sh # deploy services and mesh namespaces -$ ./deploy.sh +./deploy.sh # verify both clusters # # verify east -$ kubectl --context=k3d-east get pods +kubectl --context=k3d-east get pods NAME READY STATUS RESTARTS AGE curl-56dc7d945d-96r6p 2/2 Running 0 7s # verify west has headless service -$ kubectl --context=k3d-west get services +kubectl --context=k3d-west get services NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE kubernetes ClusterIP 10.43.0.1 443/TCP 10m nginx-svc ClusterIP None 80/TCP 8s @@ -119,7 +119,7 @@ nginx-svc ClusterIP None 80/TCP 8s # verify west has statefulset # # this may take a while to come up -$ kubectl --context=k3d-west get pods +kubectl --context=k3d-west get pods NAME READY STATUS RESTARTS AGE nginx-set-0 2/2 Running 0 53s nginx-set-1 2/2 Running 0 43s @@ -130,7 +130,7 @@ Before we go further, let's have a look at the endpoints object for the `nginx-svc`: ```sh -$ kubectl --context=k3d-west get endpoints nginx-svc -o yaml +kubectl --context=k3d-west get endpoints nginx-svc -o yaml ... subsets: - addresses: @@ -170,23 +170,23 @@ would get an answer back. We can test this out by applying the curl pod to the `west` cluster: ```sh -$ kubectl --context=k3d-west apply -f east/curl.yml -$ kubectl --context=k3d-west get pods +kubectl --context=k3d-west apply -f east/curl.yml +kubectl --context=k3d-west get pods NAME READY STATUS RESTARTS AGE nginx-set-0 2/2 Running 0 5m8s nginx-set-1 2/2 Running 0 4m58s nginx-set-2 2/2 Running 0 4m51s curl-56dc7d945d-s4n8j 0/2 PodInitializing 0 4s -$ kubectl --context=k3d-west exec -it curl-56dc7d945d-s4n8j -c curl -- bin/sh -/$ # prompt for curl pod +kubectl --context=k3d-west exec -it curl-56dc7d945d-s4n8j -c curl -- bin/sh +/# prompt for curl pod ``` If we now curl one of these instances, we will get back a response. ```sh # exec'd on the pod -/ $ curl nginx-set-0.nginx-svc.default.svc.west.cluster.local +/ curl nginx-set-0.nginx-svc.default.svc.west.cluster.local " @@ -218,10 +218,10 @@ Now, let's do the same, but this time from the `east` cluster. We will first export the service. ```sh -$ kubectl --context=k3d-west label service nginx-svc mirror.linkerd.io/exported="true" +kubectl --context=k3d-west label service nginx-svc mirror.linkerd.io/exported="true" service/nginx-svc labeled -$ kubectl --context=k3d-east get services +kubectl --context=k3d-east get services NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE kubernetes ClusterIP 10.43.0.1 443/TCP 20h nginx-svc-west ClusterIP None 80/TCP 29s @@ -235,7 +235,7 @@ endpoints for `nginx-svc-west` will have the same hostnames, but each hostname will point to one of the services we see above: ```sh -$ kubectl --context=k3d-east get endpoints nginx-svc-west -o yaml +kubectl --context=k3d-east get endpoints nginx-svc-west -o yaml subsets: - addresses: - hostname: nginx-set-0 @@ -251,17 +251,17 @@ cluster (`west`), will be mirrored as a clusterIP service. We will see in a second why this matters. ```sh -$ kubectl --context=k3d-east get pods +kubectl --context=k3d-east get pods NAME READY STATUS RESTARTS AGE curl-56dc7d945d-96r6p 2/2 Running 0 23m # exec and curl -$ kubectl --context=k3d-east exec pod curl-56dc7d945d-96r6p -it -c curl -- bin/sh +kubectl --context=k3d-east exec pod curl-56dc7d945d-96r6p -it -c curl -- bin/sh # we want to curl the same hostname we see in the endpoints object above. # however, the service and cluster domain will now be different, since we # are in a different cluster. # -/ $ curl nginx-set-0.nginx-svc-west.default.svc.east.cluster.local +/ curl nginx-set-0.nginx-svc-west.default.svc.east.cluster.local @@ -329,8 +329,8 @@ validation. To clean-up, you can remove both clusters entirely using the k3d CLI: ```sh -$ k3d cluster delete east +k3d cluster delete east cluster east deleted -$ k3d cluster delete west +k3d cluster delete west cluster west deleted ``` diff --git a/linkerd.io/content/2.12/tasks/restricting-access.md b/linkerd.io/content/2.12/tasks/restricting-access.md index 0b0b0c94b7..38ebdaeb3d 100644 --- a/linkerd.io/content/2.12/tasks/restricting-access.md +++ b/linkerd.io/content/2.12/tasks/restricting-access.md @@ -21,9 +21,9 @@ haven't already done this. Inject and install the Emojivoto application: ```bash -$ linkerd inject https://run.linkerd.io/emojivoto.yml | kubectl apply -f - +linkerd inject https://run.linkerd.io/emojivoto.yml | kubectl apply -f - ... -$ linkerd check -n emojivoto --proxy -o short +linkerd check -n emojivoto --proxy -o short ... ``` diff --git a/linkerd.io/content/2.12/tasks/securing-linkerd-tap.md b/linkerd.io/content/2.12/tasks/securing-linkerd-tap.md index d3023ec39f..f66601f5dd 100644 --- a/linkerd.io/content/2.12/tasks/securing-linkerd-tap.md +++ b/linkerd.io/content/2.12/tasks/securing-linkerd-tap.md @@ -57,7 +57,7 @@ kubectl auth can-i watch deployments.tap.linkerd.io -n emojivoto --as $(whoami) You can also use the Linkerd CLI's `--as` flag to confirm: ```bash -$ linkerd viz tap -n linkerd deploy/linkerd-controller --as $(whoami) +linkerd viz tap -n linkerd deploy/linkerd-controller --as $(whoami) Cannot connect to Linkerd Viz: namespaces is forbidden: User "XXXX" cannot list resource "namespaces" in API group "" at the cluster scope Validate the install with: linkerd viz check ... @@ -74,7 +74,7 @@ To enable tap access to all resources in all namespaces, you may bind your user to the `linkerd-linkerd-tap-admin` ClusterRole, installed by default: ```bash -$ kubectl describe clusterroles/linkerd-linkerd-viz-tap-admin +kubectl describe clusterroles/linkerd-linkerd-viz-tap-admin Name: linkerd-linkerd-viz-tap-admin Labels: component=tap linkerd.io/extension=viz @@ -106,7 +106,7 @@ kubectl create clusterrolebinding \ You can verify you now have tap access with: ```bash -$ linkerd viz tap -n linkerd deploy/linkerd-controller --as $(whoami) +linkerd viz tap -n linkerd deploy/linkerd-controller --as $(whoami) req id=3:0 proxy=in src=10.244.0.1:37392 dst=10.244.0.13:9996 tls=not_provided_by_remote :method=GET :authority=10.244.0.13:9996 :path=/ping ... ``` @@ -140,14 +140,14 @@ Because GCloud provides this additional level of access, there are cases where not. To validate this, check whether your GCloud user has Tap access: ```bash -$ kubectl auth can-i watch pods.tap.linkerd.io --all-namespaces +kubectl auth can-i watch pods.tap.linkerd.io --all-namespaces yes ``` And then validate whether your RBAC user has Tap access: ```bash -$ kubectl auth can-i watch pods.tap.linkerd.io --all-namespaces --as $(gcloud config get-value account) +kubectl auth can-i watch pods.tap.linkerd.io --all-namespaces --as $(gcloud config get-value account) no - no RBAC policy matched ``` @@ -184,14 +184,14 @@ privileges necessary to tap resources. To confirm: ```bash -$ kubectl auth can-i watch pods.tap.linkerd.io --all-namespaces --as system:serviceaccount:linkerd-viz:web +kubectl auth can-i watch pods.tap.linkerd.io --all-namespaces --as system:serviceaccount:linkerd-viz:web yes ``` This access is enabled via a `linkerd-linkerd-viz-web-admin` ClusterRoleBinding: ```bash -$ kubectl describe clusterrolebindings/linkerd-linkerd-viz-web-admin +kubectl describe clusterrolebindings/linkerd-linkerd-viz-web-admin Name: linkerd-linkerd-viz-web-admin Labels: component=web linkerd.io/extensions=viz @@ -224,6 +224,6 @@ kubectl delete clusterrolebindings/linkerd-linkerd-viz-web-admin To confirm: ```bash -$ kubectl auth can-i watch pods.tap.linkerd.io --all-namespaces --as system:serviceaccount:linkerd-viz:web +kubectl auth can-i watch pods.tap.linkerd.io --all-namespaces --as system:serviceaccount:linkerd-viz:web no ``` diff --git a/linkerd.io/content/2.12/tasks/troubleshooting.md b/linkerd.io/content/2.12/tasks/troubleshooting.md index 7ec6896a2d..8e7bf49e3c 100644 --- a/linkerd.io/content/2.12/tasks/troubleshooting.md +++ b/linkerd.io/content/2.12/tasks/troubleshooting.md @@ -230,7 +230,7 @@ Example failure: Ensure the Linkerd ClusterRoles exist: ```bash -$ kubectl get clusterroles | grep linkerd +kubectl get clusterroles | grep linkerd linkerd-linkerd-destination 9d linkerd-linkerd-identity 9d linkerd-linkerd-proxy-injector 9d @@ -240,7 +240,7 @@ linkerd-policy 9d Also ensure you have permission to create ClusterRoles: ```bash -$ kubectl auth can-i create clusterroles +kubectl auth can-i create clusterroles yes ``` @@ -257,7 +257,7 @@ Example failure: Ensure the Linkerd ClusterRoleBindings exist: ```bash -$ kubectl get clusterrolebindings | grep linkerd +kubectl get clusterrolebindings | grep linkerd linkerd-linkerd-destination 9d linkerd-linkerd-identity 9d linkerd-linkerd-proxy-injector 9d @@ -267,7 +267,7 @@ linkerd-destination-policy 9d Also ensure you have permission to create ClusterRoleBindings: ```bash -$ kubectl auth can-i create clusterrolebindings +kubectl auth can-i create clusterrolebindings yes ``` @@ -284,7 +284,7 @@ Example failure: Ensure the Linkerd ServiceAccounts exist: ```bash -$ kubectl -n linkerd get serviceaccounts +kubectl -n linkerd get serviceaccounts NAME SECRETS AGE default 1 14m linkerd-destination 1 14m @@ -297,7 +297,7 @@ Also ensure you have permission to create ServiceAccounts in the Linkerd namespace: ```bash -$ kubectl -n linkerd auth can-i create serviceaccounts +kubectl -n linkerd auth can-i create serviceaccounts yes ``` @@ -314,7 +314,7 @@ Example failure: Ensure the Linkerd CRD exists: ```bash -$ kubectl get customresourcedefinitions +kubectl get customresourcedefinitions NAME CREATED AT serviceprofiles.linkerd.io 2019-04-25T21:47:31Z ``` @@ -322,7 +322,7 @@ serviceprofiles.linkerd.io 2019-04-25T21:47:31Z Also ensure you have permission to create CRDs: ```bash -$ kubectl auth can-i create customresourcedefinitions +kubectl auth can-i create customresourcedefinitions yes ``` @@ -339,14 +339,14 @@ Example failure: Ensure the Linkerd MutatingWebhookConfigurations exists: ```bash -$ kubectl get mutatingwebhookconfigurations | grep linkerd +kubectl get mutatingwebhookconfigurations | grep linkerd linkerd-proxy-injector-webhook-config 2019-07-01T13:13:26Z ``` Also ensure you have permission to create MutatingWebhookConfigurations: ```bash -$ kubectl auth can-i create mutatingwebhookconfigurations +kubectl auth can-i create mutatingwebhookconfigurations yes ``` @@ -363,14 +363,14 @@ Example failure: Ensure the Linkerd ValidatingWebhookConfiguration exists: ```bash -$ kubectl get validatingwebhookconfigurations | grep linkerd +kubectl get validatingwebhookconfigurations | grep linkerd linkerd-sp-validator-webhook-config 2019-07-01T13:13:26Z ``` Also ensure you have permission to create ValidatingWebhookConfigurations: ```bash -$ kubectl auth can-i create validatingwebhookconfigurations +kubectl auth can-i create validatingwebhookconfigurations yes ``` @@ -418,7 +418,7 @@ Example failure: Ensure the Linkerd ConfigMap exists: ```bash -$ kubectl -n linkerd get configmap/linkerd-config +kubectl -n linkerd get configmap/linkerd-config NAME DATA AGE linkerd-config 3 61m ``` @@ -426,7 +426,7 @@ linkerd-config 3 61m Also ensure you have permission to create ConfigMaps: ```bash -$ kubectl -n linkerd auth can-i create configmap +kubectl -n linkerd auth can-i create configmap yes ``` @@ -780,7 +780,7 @@ Example failure: Verify the state of the control plane pods with: ```bash -$ kubectl -n linkerd get po +kubectl -n linkerd get po NAME READY STATUS RESTARTS AGE linkerd-destination-5fd7b5d466-szgqm 2/2 Running 1 12m linkerd-identity-54df78c479-hbh5m 2/2 Running 0 12m @@ -862,7 +862,7 @@ Ensure you can connect to the Linkerd version check endpoint from the environment the `linkerd` cli is running: ```bash -$ curl "https://versioncheck.linkerd.io/version.json?version=edge-19.1.2&uuid=test-uuid&source=cli" +curl "https://versioncheck.linkerd.io/version.json?version=edge-19.1.2&uuid=test-uuid&source=cli" {"stable":"stable-2.1.0","edge":"edge-19.1.2"} ``` @@ -921,7 +921,7 @@ normally. Example failure: ```bash -$ linkerd check --proxy --namespace foo +linkerd check --proxy --namespace foo ... × data plane namespace exists The "foo" namespace does not exist @@ -1045,7 +1045,7 @@ Ensure the kube-system namespace has the `config.linkerd.io/admission-webhooks:disabled` label: ```bash -$ kubectl get namespace kube-system -oyaml +kubectl get namespace kube-system -oyaml kind: Namespace apiVersion: v1 metadata: @@ -1118,7 +1118,7 @@ Example error: Ensure that the linkerd-cni-config ConfigMap exists in the CNI namespace: ```bash -$ kubectl get cm linkerd-cni-config -n linkerd-cni +kubectl get cm linkerd-cni-config -n linkerd-cni NAME PRIV CAPS SELINUX RUNASUSER FSGROUP SUPGROUP READONLYROOTFS VOLUMES linkerd-linkerd-cni-cni false RunAsAny RunAsAny RunAsAny RunAsAny false hostPath,secret ``` @@ -1126,7 +1126,7 @@ linkerd-linkerd-cni-cni false RunAsAny RunAsAny RunAsAny RunAs Also ensure you have permission to create ConfigMaps: ```bash -$ kubectl auth can-i create ConfigMaps +kubectl auth can-i create ConfigMaps yes ``` @@ -1143,7 +1143,7 @@ Example error: Ensure that the cluster role exists: ```bash -$ kubectl get clusterrole linkerd-cni +kubectl get clusterrole linkerd-cni NAME AGE linkerd-cni 54m ``` @@ -1151,7 +1151,7 @@ linkerd-cni 54m Also ensure you have permission to create ClusterRoles: ```bash -$ kubectl auth can-i create ClusterRoles +kubectl auth can-i create ClusterRoles yes ``` @@ -1168,7 +1168,7 @@ Example error: Ensure that the cluster role binding exists: ```bash -$ kubectl get clusterrolebinding linkerd-cni +kubectl get clusterrolebinding linkerd-cni NAME AGE linkerd-cni 54m ``` @@ -1176,7 +1176,7 @@ linkerd-cni 54m Also ensure you have permission to create ClusterRoleBindings: ```bash -$ kubectl auth can-i create ClusterRoleBindings +kubectl auth can-i create ClusterRoleBindings yes ``` @@ -1193,7 +1193,7 @@ Example error: Ensure that the CNI service account exists in the CNI namespace: ```bash -$ kubectl get ServiceAccount linkerd-cni -n linkerd-cni +kubectl get ServiceAccount linkerd-cni -n linkerd-cni NAME SECRETS AGE linkerd-cni 1 45m ``` @@ -1201,7 +1201,7 @@ linkerd-cni 1 45m Also ensure you have permission to create ServiceAccount: ```bash -$ kubectl auth can-i create ServiceAccounts -n linkerd-cni +kubectl auth can-i create ServiceAccounts -n linkerd-cni yes ``` @@ -1218,7 +1218,7 @@ Example error: Ensure that the CNI daemonset exists in the CNI namespace: ```bash -$ kubectl get ds -n linkerd-cni +kubectl get ds -n linkerd-cni NAME DESIRED CURRENT READY UP-TO-DATE AVAILABLE NODE SELECTOR AGE linkerd-cni 1 1 1 1 1 beta.kubernetes.io/os=linux 14m ``` @@ -1226,7 +1226,7 @@ linkerd-cni 1 1 1 1 1 beta.kubernet Also ensure you have permission to create DaemonSets: ```bash -$ kubectl auth can-i create DaemonSets -n linkerd-cni +kubectl auth can-i create DaemonSets -n linkerd-cni yes ``` @@ -1243,7 +1243,7 @@ Example failure: Ensure that all the CNI pods are running: ```bash -$ kubectl get po -n linkerd-cn +kubectl get po -n linkerd-cn NAME READY STATUS RESTARTS AGE linkerd-cni-rzp2q 1/1 Running 0 9m20s linkerd-cni-mf564 1/1 Running 0 9m22s @@ -1253,7 +1253,7 @@ linkerd-cni-p5670 1/1 Running 0 9m25s Ensure that all pods have finished the deployment of the CNI config and binary: ```bash -$ kubectl logs linkerd-cni-rzp2q -n linkerd-cni +kubectl logs linkerd-cni-rzp2q -n linkerd-cni Wrote linkerd CNI binaries to /host/opt/cni/bin Created CNI config /host/etc/cni/net.d/10-kindnet.conflist Done configuring CNI. Sleep=true @@ -1281,7 +1281,7 @@ Make sure multicluster extension is correctly installed and that the `links.multicluster.linkerd.io` CRD is present. ```bash -$ kubectl get crds | grep multicluster +kubectl get crds | grep multicluster NAME CREATED AT links.multicluster.linkerd.io 2021-03-10T09:58:10Z ``` @@ -1360,7 +1360,7 @@ the rules section. Expected rules for `linkerd-service-mirror-access-local-resources` cluster role: ```bash -$ kubectl --context=local get clusterrole linkerd-service-mirror-access-local-resources -o yaml +kubectl --context=local get clusterrole linkerd-service-mirror-access-local-resources -o yaml kind: ClusterRole metadata: labels: @@ -1393,7 +1393,7 @@ rules: Expected rules for `linkerd-service-mirror-read-remote-creds` role: ```bash -$ kubectl --context=local get role linkerd-service-mirror-read-remote-creds -n linkerd-multicluster -o yaml +kubectl --context=local get role linkerd-service-mirror-read-remote-creds -n linkerd-multicluster -o yaml kind: Role metadata: labels: @@ -1426,7 +1426,7 @@ everything to start up. If this is a permanent error, you'll want to validate the state of the controller pod with: ```bash -$ kubectl --all-namespaces get po --selector linkerd.io/control-plane-component=linkerd-service-mirror +kubectl --all-namespaces get po --selector linkerd.io/control-plane-component=linkerd-service-mirror NAME READY STATUS RESTARTS AGE linkerd-service-mirror-7bb8ff5967-zg265 2/2 Running 0 50m ``` @@ -1544,7 +1544,7 @@ Example failure: Ensure the linkerd-viz extension ClusterRoles exist: ```bash -$ kubectl get clusterroles | grep linkerd-viz +kubectl get clusterroles | grep linkerd-viz linkerd-linkerd-viz-metrics-api 2021-01-26T18:02:17Z linkerd-linkerd-viz-prometheus 2021-01-26T18:02:17Z linkerd-linkerd-viz-tap 2021-01-26T18:02:17Z @@ -1555,7 +1555,7 @@ linkerd-linkerd-viz-web-check 2021-01-2 Also ensure you have permission to create ClusterRoles: ```bash -$ kubectl auth can-i create clusterroles +kubectl auth can-i create clusterroles yes ``` @@ -1572,7 +1572,7 @@ Example failure: Ensure the linkerd-viz extension ClusterRoleBindings exist: ```bash -$ kubectl get clusterrolebindings | grep linkerd-viz +kubectl get clusterrolebindings | grep linkerd-viz linkerd-linkerd-viz-metrics-api ClusterRole/linkerd-linkerd-viz-metrics-api 18h linkerd-linkerd-viz-prometheus ClusterRole/linkerd-linkerd-viz-prometheus 18h linkerd-linkerd-viz-tap ClusterRole/linkerd-linkerd-viz-tap 18h @@ -1584,7 +1584,7 @@ linkerd-linkerd-viz-web-check ClusterRole/linkerd-linke Also ensure you have permission to create ClusterRoleBindings: ```bash -$ kubectl auth can-i create clusterrolebindings +kubectl auth can-i create clusterrolebindings yes ``` @@ -1673,7 +1673,7 @@ requirements in the cluster: Ensure all the linkerd-viz pods are injected ```bash -$ kubectl -n linkerd-viz get pods +kubectl -n linkerd-viz get pods NAME READY STATUS RESTARTS AGE grafana-68cddd7cc8-nrv4h 2/2 Running 3 18h metrics-api-77f684f7c7-hnw8r 2/2 Running 2 18h @@ -1697,7 +1697,7 @@ Make sure that the `proxy-injector` is working correctly by running Ensure all the linkerd-viz pods are running with 2/2 ```bash -$ kubectl -n linkerd-viz get pods +kubectl -n linkerd-viz get pods NAME READY STATUS RESTARTS AGE grafana-68cddd7cc8-nrv4h 2/2 Running 3 18h metrics-api-77f684f7c7-hnw8r 2/2 Running 2 18h @@ -1880,7 +1880,7 @@ versions in sync by updating either the CLI or linkerd-jaeger as necessary. Ensure all the jaeger pods are injected ```bash -$ kubectl -n linkerd-jaeger get pods +kubectl -n linkerd-jaeger get pods NAME READY STATUS RESTARTS AGE collector-69cc44dfbc-rhpfg 2/2 Running 0 11s jaeger-6f98d5c979-scqlq 2/2 Running 0 11s @@ -1901,7 +1901,7 @@ Make sure that the `proxy-injector` is working correctly by running Ensure all the linkerd-jaeger pods are running with 2/2 ```bash -$ kubectl -n linkerd-jaeger get pods +kubectl -n linkerd-jaeger get pods NAME READY STATUS RESTARTS AGE jaeger-injector-548684d74b-bcq5h 2/2 Running 0 5s collector-69cc44dfbc-wqf6s 2/2 Running 0 5s @@ -1950,7 +1950,7 @@ Ensure you can connect to the Linkerd Buoyant version check endpoint from the environment the `linkerd` cli is running: ```bash -$ curl https://buoyant.cloud/version.json +curl https://buoyant.cloud/version.json {"linkerd-buoyant":"v0.4.4"} ``` @@ -2015,7 +2015,7 @@ linkerd-buoyant install | kubectl apply -f - Ensure that the cluster role exists: ```bash -$ kubectl get clusterrole buoyant-cloud-agent +kubectl get clusterrole buoyant-cloud-agent NAME CREATED AT buoyant-cloud-agent 2020-11-13T00:59:50Z ``` @@ -2023,7 +2023,7 @@ buoyant-cloud-agent 2020-11-13T00:59:50Z Also ensure you have permission to create ClusterRoles: ```bash -$ kubectl auth can-i create ClusterRoles +kubectl auth can-i create ClusterRoles yes ``` @@ -2038,7 +2038,7 @@ yes Ensure that the cluster role binding exists: ```bash -$ kubectl get clusterrolebinding buoyant-cloud-agent +kubectl get clusterrolebinding buoyant-cloud-agent NAME ROLE AGE buoyant-cloud-agent ClusterRole/buoyant-cloud-agent 301d ``` @@ -2046,7 +2046,7 @@ buoyant-cloud-agent ClusterRole/buoyant-cloud-agent 301d Also ensure you have permission to create ClusterRoleBindings: ```bash -$ kubectl auth can-i create ClusterRoleBindings +kubectl auth can-i create ClusterRoleBindings yes ``` @@ -2061,7 +2061,7 @@ yes Ensure that the service account exists: ```bash -$ kubectl -n buoyant-cloud get serviceaccount buoyant-cloud-agent +kubectl -n buoyant-cloud get serviceaccount buoyant-cloud-agent NAME SECRETS AGE buoyant-cloud-agent 1 301d ``` @@ -2069,7 +2069,7 @@ buoyant-cloud-agent 1 301d Also ensure you have permission to create ServiceAccounts: ```bash -$ kubectl -n buoyant-cloud auth can-i create ServiceAccount +kubectl -n buoyant-cloud auth can-i create ServiceAccount yes ``` @@ -2084,7 +2084,7 @@ yes Ensure that the secret exists: ```bash -$ kubectl -n buoyant-cloud get secret buoyant-cloud-id +kubectl -n buoyant-cloud get secret buoyant-cloud-id NAME TYPE DATA AGE buoyant-cloud-id Opaque 4 301d ``` @@ -2092,7 +2092,7 @@ buoyant-cloud-id Opaque 4 301d Also ensure you have permission to create ServiceAccounts: ```bash -$ kubectl -n buoyant-cloud auth can-i create ServiceAccount +kubectl -n buoyant-cloud auth can-i create ServiceAccount yes ``` @@ -2130,7 +2130,7 @@ everything to start up. If this is a permanent error, you'll want to validate the state of the `buoyant-cloud-agent` Deployment with: ```bash -$ kubectl -n buoyant-cloud get po --selector app=buoyant-cloud-agent +kubectl -n buoyant-cloud get po --selector app=buoyant-cloud-agent NAME READY STATUS RESTARTS AGE buoyant-cloud-agent-6b8c6888d7-htr7d 2/2 Running 0 156m ``` @@ -2153,7 +2153,7 @@ Ensure the `buoyant-cloud-agent` pod is injected, the `READY` column should show `2/2`: ```bash -$ kubectl -n buoyant-cloud get pods --selector app=buoyant-cloud-agent +kubectl -n buoyant-cloud get pods --selector app=buoyant-cloud-agent NAME READY STATUS RESTARTS AGE buoyant-cloud-agent-6b8c6888d7-htr7d 2/2 Running 0 161m ``` @@ -2172,7 +2172,7 @@ Make sure that the `proxy-injector` is working correctly by running Check the version with: ```bash -$ linkerd-buoyant version +linkerd-buoyant version CLI version: v0.4.4 Agent version: v0.4.4 ``` @@ -2231,7 +2231,7 @@ everything to start up. If this is a permanent error, you'll want to validate the state of the `buoyant-cloud-metrics` DaemonSet with: ```bash -$ kubectl -n buoyant-cloud get po --selector app=buoyant-cloud-metrics +kubectl -n buoyant-cloud get po --selector app=buoyant-cloud-metrics NAME READY STATUS RESTARTS AGE buoyant-cloud-metrics-kt9mv 2/2 Running 0 163m buoyant-cloud-metrics-q8jhj 2/2 Running 0 163m @@ -2257,7 +2257,7 @@ Ensure the `buoyant-cloud-metrics` pods are injected, the `READY` column should show `2/2`: ```bash -$ kubectl -n buoyant-cloud get pods --selector app=buoyant-cloud-metrics +kubectl -n buoyant-cloud get pods --selector app=buoyant-cloud-metrics NAME READY STATUS RESTARTS AGE buoyant-cloud-metrics-kt9mv 2/2 Running 0 166m buoyant-cloud-metrics-q8jhj 2/2 Running 0 166m @@ -2279,7 +2279,7 @@ Make sure that the `proxy-injector` is working correctly by running Check the version with: ```bash -$ kubectl -n buoyant-cloud get daemonset/buoyant-cloud-metrics -o jsonpath='{.metadata.labels}' +kubectl -n buoyant-cloud get daemonset/buoyant-cloud-metrics -o jsonpath='{.metadata.labels}' {"app.kubernetes.io/name":"metrics","app.kubernetes.io/part-of":"buoyant-cloud","app.kubernetes.io/version":"v0.4.4"} ``` diff --git a/linkerd.io/content/2.12/tasks/upgrade.md b/linkerd.io/content/2.12/tasks/upgrade.md index 7e608fd341..8f78d275c3 100644 --- a/linkerd.io/content/2.12/tasks/upgrade.md +++ b/linkerd.io/content/2.12/tasks/upgrade.md @@ -290,7 +290,7 @@ Find the release name you used for the `linkerd2` chart, and the namespace where this release stored its config: ```bash -$ helm ls -A +helm ls -A NAME NAMESPACE REVISION UPDATED STATUS CHART APP VERSION linkerd default 1 2021-11-22 17:14:50.751436374 -0500 -05 deployed linkerd2-2.11.1 stable-2.11.1 ``` @@ -323,18 +323,18 @@ the `linkerd-crds`, `linkerd-control-plane` and `linkerd-smi` charts: ```bash # First migrate the CRDs -$ helm -n default get manifest linkerd | \ +helm -n default get manifest linkerd | \ yq 'select(.kind == "CustomResourceDefinition") | .metadata.name' | \ grep -v '\-\-\-' | \ xargs -n1 sh -c \ 'kubectl annotate --overwrite crd/$0 meta.helm.sh/release-name=linkerd-crds meta.helm.sh/release-namespace=linkerd' # Special case for TrafficSplit (only use if you have TrafficSplit CRs) -$ kubectl annotate --overwrite crd/trafficsplits.split.smi-spec.io \ +kubectl annotate --overwrite crd/trafficsplits.split.smi-spec.io \ meta.helm.sh/release-name=linkerd-smi meta.helm.sh/release-namespace=linkerd-smi # Now migrate all the other resources -$ helm -n default get manifest linkerd | \ +helm -n default get manifest linkerd | \ yq 'select(.kind != "CustomResourceDefinition")' | \ yq '.kind, .metadata.name, .metadata.namespace' | \ grep -v '\-\-\-' | @@ -348,14 +348,14 @@ above. ```bash # First make sure you update the helm repo -$ helm repo up +helm repo up # Install the linkerd-crds chart -$ helm install linkerd-crds -n linkerd --create-namespace linkerd/linkerd-crds +helm install linkerd-crds -n linkerd --create-namespace linkerd/linkerd-crds # Install the linkerd-control-plane chart # (remember to add any customizations you retrieved above) -$ helm install linkerd-control-plane \ +helm install linkerd-control-plane \ -n linkerd \ --set-file identityTrustAnchorsPEM=ca.crt \ --set-file identity.issuer.tls.crtPEM=issuer.crt \ @@ -363,8 +363,8 @@ $ helm install linkerd-control-plane \ linkerd/linkerd-control-plane # Optional: if using TrafficSplit CRs -$ helm repo add l5d-smi https://linkerd.github.io/linkerd-smi -$ helm install linkerd-smi -n linkerd-smi --create-namespace l5d-smi/linkerd-smi +helm repo add l5d-smi https://linkerd.github.io/linkerd-smi +helm install linkerd-smi -n linkerd-smi --create-namespace l5d-smi/linkerd-smi ``` ##### Cleaning up the old linkerd2 Helm release @@ -375,7 +375,7 @@ remove the Helm release config for the old `linkerd2` chart (assuming you used the "Secret" storage backend, which is the default): ```bash -$ kubectl -n default delete secret \ +kubectl -n default delete secret \ --field-selector type=helm.sh/release.v1 \ -l name=linkerd,owner=helm ``` diff --git a/linkerd.io/content/2.13/reference/cli/check.md b/linkerd.io/content/2.13/reference/cli/check.md index 7cd61cd237..67a2486908 100644 --- a/linkerd.io/content/2.13/reference/cli/check.md +++ b/linkerd.io/content/2.13/reference/cli/check.md @@ -12,7 +12,7 @@ for a full list of all the possible checks, what they do and how to fix them. ## Example output ```bash -$ linkerd check +linkerd check kubernetes-api -------------- √ can initialize the client diff --git a/linkerd.io/content/2.13/reference/iptables.md b/linkerd.io/content/2.13/reference/iptables.md index 67a7ea89de..9b4d229a59 100644 --- a/linkerd.io/content/2.13/reference/iptables.md +++ b/linkerd.io/content/2.13/reference/iptables.md @@ -164,7 +164,7 @@ Alternatively, if you want to inspect the iptables rules created for a pod, you can retrieve them through the following command: ```bash -$ kubectl -n logs linkerd-init +kubectl -n logs linkerd-init # where is the name of the pod # you want to see the iptables rules for ``` diff --git a/linkerd.io/content/2.13/tasks/configuring-dynamic-request-routing.md b/linkerd.io/content/2.13/tasks/configuring-dynamic-request-routing.md index 8137e79797..af753bbcf7 100644 --- a/linkerd.io/content/2.13/tasks/configuring-dynamic-request-routing.md +++ b/linkerd.io/content/2.13/tasks/configuring-dynamic-request-routing.md @@ -67,7 +67,7 @@ Requests to `/echo` on port 9898 to the frontend pod will get forwarded the pod pointed by the Service `backend-a-podinfo`: ```bash -$ curl -sX POST localhost:9898/echo \ +curl -sX POST localhost:9898/echo \ | grep -o 'PODINFO_UI_MESSAGE=. backend' PODINFO_UI_MESSAGE=A backend @@ -142,7 +142,7 @@ to the `backend-a-podinfo` Service. The previous requests should still reach `backend-a-podinfo` only: ```bash -$ curl -sX POST localhost:9898/echo \ +curl -sX POST localhost:9898/echo \ | grep -o 'PODINFO_UI_MESSAGE=. backend' PODINFO_UI_MESSAGE=A backend @@ -152,7 +152,7 @@ But if we add the "`x-request-id: alternative`" header they get routed to `backend-b-podinfo`: ```bash -$ curl -sX POST \ +curl -sX POST \ -H 'x-request-id: alternative' \ localhost:9898/echo \ | grep -o 'PODINFO_UI_MESSAGE=. backend' diff --git a/linkerd.io/content/2.13/tasks/configuring-per-route-policy.md b/linkerd.io/content/2.13/tasks/configuring-per-route-policy.md index 018f3a706a..30e18a67c1 100644 --- a/linkerd.io/content/2.13/tasks/configuring-per-route-policy.md +++ b/linkerd.io/content/2.13/tasks/configuring-per-route-policy.md @@ -30,7 +30,7 @@ haven't already done this. Inject and install the Books demo application: ```bash -$ kubectl create ns booksapp && \ +kubectl create ns booksapp && \ curl --proto '=https' --tlsv1.2 -sSfL https://run.linkerd.io/booksapp.yml \ | linkerd inject - \ | kubectl -n booksapp apply -f - @@ -44,21 +44,21 @@ run in the `booksapp` namespace. Confirm that the Linkerd data plane was injected successfully: ```bash -$ linkerd check -n booksapp --proxy -o short +linkerd check -n booksapp --proxy -o short ``` You can take a quick look at all the components that were added to your cluster by running: ```bash -$ kubectl -n booksapp get all +kubectl -n booksapp get all ``` Once the rollout has completed successfully, you can access the app itself by port-forwarding `webapp` locally: ```bash -$ kubectl -n booksapp port-forward svc/webapp 7000 & +kubectl -n booksapp port-forward svc/webapp 7000 & ``` Open [http://localhost:7000/](http://localhost:7000/) in your browser to see the @@ -87,7 +87,7 @@ First, let's run the `linkerd viz authz` command to list the authorization resources that currently exist for the `authors` deployment: ```bash -$ linkerd viz authz -n booksapp deploy/authors +linkerd viz authz -n booksapp deploy/authors ROUTE SERVER AUTHORIZATION UNAUTHORIZED SUCCESS RPS LATENCY_P50 LATENCY_P95 LATENCY_P99 default default:all-unauthenticated default/all-unauthenticated 0.0rps 70.31% 8.1rps 1ms 43ms 49ms probe default:all-unauthenticated default/probe 0.0rps 100.00% 0.3rps 1ms 1ms 1ms @@ -124,7 +124,7 @@ Now that we've defined a [`Server`] for the authors `Deployment`, we can run the currently unauthorized: ```bash -$ linkerd viz authz -n booksapp deploy/authors +linkerd viz authz -n booksapp deploy/authors ROUTE SERVER AUTHORIZATION UNAUTHORIZED SUCCESS RPS LATENCY_P50 LATENCY_P95 LATENCY_P99 default authors-server 9.5rps 0.00% 0.0rps 0ms 0ms 0ms probe authors-server default/probe 0.0rps 100.00% 0.1rps 1ms 1ms 1ms @@ -291,7 +291,7 @@ network (0.0.0.0). Running `linkerd viz authz` again, we can now see that our new policies exist: ```bash -$ linkerd viz authz -n booksapp deploy/authors +linkerd viz authz -n booksapp deploy/authors ROUTE SERVER AUTHORIZATION UNAUTHORIZED SUCCESS RPS LATENCY_P50 LATENCY_P95 LATENCY_P99 authors-get-route authors-server authorizationpolicy/authors-get-policy 0.0rps 100.00% 0.1rps 2ms 2ms 2ms authors-probe-route authors-server authorizationpolicy/authors-probe-policy 0.0rps 100.00% 0.1rps 1ms 1ms 1ms @@ -362,7 +362,7 @@ requests, but we haven't _authorized_ requests to that route. Running the requests to `authors-modify-route`: ```bash -$ linkerd viz authz -n booksapp deploy/authors +linkerd viz authz -n booksapp deploy/authors ROUTE SERVER AUTHORIZATION UNAUTHORIZED SUCCESS RPS LATENCY_P50 LATENCY_P95 LATENCY_P99 authors-get-route authors-server authorizationpolicy/authors-get-policy - - - - - - authors-modify-route authors-server 9.7rps 0.00% 0.0rps 0ms 0ms 0ms @@ -421,7 +421,7 @@ Running the `linkerd viz authz` command one last time, we now see that all traffic is authorized: ```bash -$ linkerd viz authz -n booksapp deploy/authors +linkerd viz authz -n booksapp deploy/authors ROUTE SERVER AUTHORIZATION UNAUTHORIZED SUCCESS RPS LATENCY_P50 LATENCY_P95 LATENCY_P99 authors-get-route authors-server authorizationpolicy/authors-get-policy 0.0rps 100.00% 0.1rps 0ms 0ms 0ms authors-modify-route authors-server authorizationpolicy/authors-modify-policy 0.0rps 100.00% 0.0rps 0ms 0ms 0ms diff --git a/linkerd.io/content/2.13/tasks/getting-per-route-metrics.md b/linkerd.io/content/2.13/tasks/getting-per-route-metrics.md index ddd2a4dc3c..9f66470e28 100644 --- a/linkerd.io/content/2.13/tasks/getting-per-route-metrics.md +++ b/linkerd.io/content/2.13/tasks/getting-per-route-metrics.md @@ -24,7 +24,7 @@ per-route authorization. You can view per-route metrics in the CLI by running `linkerd viz routes`: ```bash -$ linkerd viz routes svc/webapp +linkerd viz routes svc/webapp ROUTE SERVICE SUCCESS RPS LATENCY_P50 LATENCY_P95 LATENCY_P99 GET / webapp 100.00% 0.6rps 25ms 30ms 30ms GET /authors/{id} webapp 100.00% 0.6rps 22ms 29ms 30ms @@ -44,7 +44,7 @@ specified in your service profile will end up there. It is also possible to look the metrics up by other resource types, such as: ```bash -$ linkerd viz routes deploy/webapp +linkerd viz routes deploy/webapp ROUTE SERVICE SUCCESS RPS LATENCY_P50 LATENCY_P95 LATENCY_P99 [DEFAULT] kubernetes 0.00% 0.0rps 0ms 0ms 0ms GET / webapp 100.00% 0.5rps 27ms 38ms 40ms @@ -63,7 +63,7 @@ Then, it is possible to filter all the way down to requests going from a specific resource to other services: ```bash -$ linkerd viz routes deploy/webapp --to svc/books +linkerd viz routes deploy/webapp --to svc/books ROUTE SERVICE SUCCESS RPS LATENCY_P50 LATENCY_P95 LATENCY_P99 DELETE /books/{id}.json books 100.00% 0.5rps 18ms 29ms 30ms GET /books.json books 100.00% 1.1rps 7ms 12ms 18ms diff --git a/linkerd.io/content/2.13/tasks/multicluster-using-statefulsets.md b/linkerd.io/content/2.13/tasks/multicluster-using-statefulsets.md index 9d8730b5b0..c720c09563 100644 --- a/linkerd.io/content/2.13/tasks/multicluster-using-statefulsets.md +++ b/linkerd.io/content/2.13/tasks/multicluster-using-statefulsets.md @@ -48,8 +48,8 @@ The first step is to clone the demo repository on your local machine. ```sh # clone example repository -$ git clone git@github.com:mateiidavid/l2d-k3d-statefulset.git -$ cd l2d-k3d-statefulset +git clone git@github.com:mateiidavid/l2d-k3d-statefulset.git +cd l2d-k3d-statefulset ``` The second step consists of creating two `k3d` clusters named `east` and `west`, @@ -60,10 +60,10 @@ everything. ```sh # create k3d clusters -$ ./create.sh +./create.sh # list the clusters -$ k3d cluster list +k3d cluster list NAME SERVERS AGENTS LOADBALANCER east 1/1 0/0 true west 1/1 0/0 true @@ -78,10 +78,10 @@ provided scripts, but feel free to have a look! ```sh # Install Linkerd and multicluster, output to check should be a success -$ ./install.sh +./install.sh # Next, link the two clusters together -$ ./link.sh +./link.sh ``` Perfect! If you've made it this far with no errors, then it's a good sign. In @@ -101,17 +101,17 @@ communication. First, we will deploy our pods and services: ```sh # deploy services and mesh namespaces -$ ./deploy.sh +./deploy.sh # verify both clusters # # verify east -$ kubectl --context=k3d-east get pods +kubectl --context=k3d-east get pods NAME READY STATUS RESTARTS AGE curl-56dc7d945d-96r6p 2/2 Running 0 7s # verify west has headless service -$ kubectl --context=k3d-west get services +kubectl --context=k3d-west get services NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE kubernetes ClusterIP 10.43.0.1 443/TCP 10m nginx-svc ClusterIP None 80/TCP 8s @@ -119,7 +119,7 @@ nginx-svc ClusterIP None 80/TCP 8s # verify west has statefulset # # this may take a while to come up -$ kubectl --context=k3d-west get pods +kubectl --context=k3d-west get pods NAME READY STATUS RESTARTS AGE nginx-set-0 2/2 Running 0 53s nginx-set-1 2/2 Running 0 43s @@ -130,7 +130,7 @@ Before we go further, let's have a look at the endpoints object for the `nginx-svc`: ```sh -$ kubectl --context=k3d-west get endpoints nginx-svc -o yaml +kubectl --context=k3d-west get endpoints nginx-svc -o yaml ... subsets: - addresses: @@ -170,23 +170,23 @@ would get an answer back. We can test this out by applying the curl pod to the `west` cluster: ```sh -$ kubectl --context=k3d-west apply -f east/curl.yml -$ kubectl --context=k3d-west get pods +kubectl --context=k3d-west apply -f east/curl.yml +kubectl --context=k3d-west get pods NAME READY STATUS RESTARTS AGE nginx-set-0 2/2 Running 0 5m8s nginx-set-1 2/2 Running 0 4m58s nginx-set-2 2/2 Running 0 4m51s curl-56dc7d945d-s4n8j 0/2 PodInitializing 0 4s -$ kubectl --context=k3d-west exec -it curl-56dc7d945d-s4n8j -c curl -- bin/sh -/$ # prompt for curl pod +kubectl --context=k3d-west exec -it curl-56dc7d945d-s4n8j -c curl -- bin/sh +/# prompt for curl pod ``` If we now curl one of these instances, we will get back a response. ```sh # exec'd on the pod -/ $ curl nginx-set-0.nginx-svc.default.svc.west.cluster.local +/ curl nginx-set-0.nginx-svc.default.svc.west.cluster.local " @@ -218,10 +218,10 @@ Now, let's do the same, but this time from the `east` cluster. We will first export the service. ```sh -$ kubectl --context=k3d-west label service nginx-svc mirror.linkerd.io/exported="true" +kubectl --context=k3d-west label service nginx-svc mirror.linkerd.io/exported="true" service/nginx-svc labeled -$ kubectl --context=k3d-east get services +kubectl --context=k3d-east get services NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE kubernetes ClusterIP 10.43.0.1 443/TCP 20h nginx-svc-west ClusterIP None 80/TCP 29s @@ -235,7 +235,7 @@ endpoints for `nginx-svc-west` will have the same hostnames, but each hostname will point to one of the services we see above: ```sh -$ kubectl --context=k3d-east get endpoints nginx-svc-west -o yaml +kubectl --context=k3d-east get endpoints nginx-svc-west -o yaml subsets: - addresses: - hostname: nginx-set-0 @@ -251,17 +251,17 @@ cluster (`west`), will be mirrored as a clusterIP service. We will see in a second why this matters. ```sh -$ kubectl --context=k3d-east get pods +kubectl --context=k3d-east get pods NAME READY STATUS RESTARTS AGE curl-56dc7d945d-96r6p 2/2 Running 0 23m # exec and curl -$ kubectl --context=k3d-east exec pod curl-56dc7d945d-96r6p -it -c curl -- bin/sh +kubectl --context=k3d-east exec pod curl-56dc7d945d-96r6p -it -c curl -- bin/sh # we want to curl the same hostname we see in the endpoints object above. # however, the service and cluster domain will now be different, since we # are in a different cluster. # -/ $ curl nginx-set-0.nginx-svc-west.default.svc.east.cluster.local +/ curl nginx-set-0.nginx-svc-west.default.svc.east.cluster.local @@ -329,8 +329,8 @@ validation. To clean-up, you can remove both clusters entirely using the k3d CLI: ```sh -$ k3d cluster delete east +k3d cluster delete east cluster east deleted -$ k3d cluster delete west +k3d cluster delete west cluster west deleted ``` diff --git a/linkerd.io/content/2.13/tasks/restricting-access.md b/linkerd.io/content/2.13/tasks/restricting-access.md index 0b0b0c94b7..38ebdaeb3d 100644 --- a/linkerd.io/content/2.13/tasks/restricting-access.md +++ b/linkerd.io/content/2.13/tasks/restricting-access.md @@ -21,9 +21,9 @@ haven't already done this. Inject and install the Emojivoto application: ```bash -$ linkerd inject https://run.linkerd.io/emojivoto.yml | kubectl apply -f - +linkerd inject https://run.linkerd.io/emojivoto.yml | kubectl apply -f - ... -$ linkerd check -n emojivoto --proxy -o short +linkerd check -n emojivoto --proxy -o short ... ``` diff --git a/linkerd.io/content/2.13/tasks/securing-linkerd-tap.md b/linkerd.io/content/2.13/tasks/securing-linkerd-tap.md index 8a802c890c..639f81692f 100644 --- a/linkerd.io/content/2.13/tasks/securing-linkerd-tap.md +++ b/linkerd.io/content/2.13/tasks/securing-linkerd-tap.md @@ -60,7 +60,7 @@ kubectl auth can-i watch deployments.tap.linkerd.io -n emojivoto --as $(whoami) You can also use the Linkerd CLI's `--as` flag to confirm: ```bash -$ linkerd viz tap -n linkerd deploy/linkerd-controller --as $(whoami) +linkerd viz tap -n linkerd deploy/linkerd-controller --as $(whoami) Cannot connect to Linkerd Viz: namespaces is forbidden: User "XXXX" cannot list resource "namespaces" in API group "" at the cluster scope Validate the install with: linkerd viz check ... @@ -77,7 +77,7 @@ To enable tap access to all resources in all namespaces, you may bind your user to the `linkerd-linkerd-tap-admin` ClusterRole, installed by default: ```bash -$ kubectl describe clusterroles/linkerd-linkerd-viz-tap-admin +kubectl describe clusterroles/linkerd-linkerd-viz-tap-admin Name: linkerd-linkerd-viz-tap-admin Labels: component=tap linkerd.io/extension=viz @@ -109,7 +109,7 @@ kubectl create clusterrolebinding \ You can verify you now have tap access with: ```bash -$ linkerd viz tap -n linkerd deploy/linkerd-controller --as $(whoami) +linkerd viz tap -n linkerd deploy/linkerd-controller --as $(whoami) req id=3:0 proxy=in src=10.244.0.1:37392 dst=10.244.0.13:9996 tls=not_provided_by_remote :method=GET :authority=10.244.0.13:9996 :path=/ping ... ``` @@ -143,14 +143,14 @@ Because GCloud provides this additional level of access, there are cases where not. To validate this, check whether your GCloud user has Tap access: ```bash -$ kubectl auth can-i watch pods.tap.linkerd.io --all-namespaces +kubectl auth can-i watch pods.tap.linkerd.io --all-namespaces yes ``` And then validate whether your RBAC user has Tap access: ```bash -$ kubectl auth can-i watch pods.tap.linkerd.io --all-namespaces --as $(gcloud config get-value account) +kubectl auth can-i watch pods.tap.linkerd.io --all-namespaces --as $(gcloud config get-value account) no - no RBAC policy matched ``` @@ -187,14 +187,14 @@ privileges necessary to tap resources. To confirm: ```bash -$ kubectl auth can-i watch pods.tap.linkerd.io --all-namespaces --as system:serviceaccount:linkerd-viz:web +kubectl auth can-i watch pods.tap.linkerd.io --all-namespaces --as system:serviceaccount:linkerd-viz:web yes ``` This access is enabled via a `linkerd-linkerd-viz-web-admin` ClusterRoleBinding: ```bash -$ kubectl describe clusterrolebindings/linkerd-linkerd-viz-web-admin +kubectl describe clusterrolebindings/linkerd-linkerd-viz-web-admin Name: linkerd-linkerd-viz-web-admin Labels: component=web linkerd.io/extensions=viz @@ -227,6 +227,6 @@ kubectl delete clusterrolebindings/linkerd-linkerd-viz-web-admin To confirm: ```bash -$ kubectl auth can-i watch pods.tap.linkerd.io --all-namespaces --as system:serviceaccount:linkerd-viz:web +kubectl auth can-i watch pods.tap.linkerd.io --all-namespaces --as system:serviceaccount:linkerd-viz:web no ``` diff --git a/linkerd.io/content/2.13/tasks/troubleshooting.md b/linkerd.io/content/2.13/tasks/troubleshooting.md index 7ec6896a2d..8e7bf49e3c 100644 --- a/linkerd.io/content/2.13/tasks/troubleshooting.md +++ b/linkerd.io/content/2.13/tasks/troubleshooting.md @@ -230,7 +230,7 @@ Example failure: Ensure the Linkerd ClusterRoles exist: ```bash -$ kubectl get clusterroles | grep linkerd +kubectl get clusterroles | grep linkerd linkerd-linkerd-destination 9d linkerd-linkerd-identity 9d linkerd-linkerd-proxy-injector 9d @@ -240,7 +240,7 @@ linkerd-policy 9d Also ensure you have permission to create ClusterRoles: ```bash -$ kubectl auth can-i create clusterroles +kubectl auth can-i create clusterroles yes ``` @@ -257,7 +257,7 @@ Example failure: Ensure the Linkerd ClusterRoleBindings exist: ```bash -$ kubectl get clusterrolebindings | grep linkerd +kubectl get clusterrolebindings | grep linkerd linkerd-linkerd-destination 9d linkerd-linkerd-identity 9d linkerd-linkerd-proxy-injector 9d @@ -267,7 +267,7 @@ linkerd-destination-policy 9d Also ensure you have permission to create ClusterRoleBindings: ```bash -$ kubectl auth can-i create clusterrolebindings +kubectl auth can-i create clusterrolebindings yes ``` @@ -284,7 +284,7 @@ Example failure: Ensure the Linkerd ServiceAccounts exist: ```bash -$ kubectl -n linkerd get serviceaccounts +kubectl -n linkerd get serviceaccounts NAME SECRETS AGE default 1 14m linkerd-destination 1 14m @@ -297,7 +297,7 @@ Also ensure you have permission to create ServiceAccounts in the Linkerd namespace: ```bash -$ kubectl -n linkerd auth can-i create serviceaccounts +kubectl -n linkerd auth can-i create serviceaccounts yes ``` @@ -314,7 +314,7 @@ Example failure: Ensure the Linkerd CRD exists: ```bash -$ kubectl get customresourcedefinitions +kubectl get customresourcedefinitions NAME CREATED AT serviceprofiles.linkerd.io 2019-04-25T21:47:31Z ``` @@ -322,7 +322,7 @@ serviceprofiles.linkerd.io 2019-04-25T21:47:31Z Also ensure you have permission to create CRDs: ```bash -$ kubectl auth can-i create customresourcedefinitions +kubectl auth can-i create customresourcedefinitions yes ``` @@ -339,14 +339,14 @@ Example failure: Ensure the Linkerd MutatingWebhookConfigurations exists: ```bash -$ kubectl get mutatingwebhookconfigurations | grep linkerd +kubectl get mutatingwebhookconfigurations | grep linkerd linkerd-proxy-injector-webhook-config 2019-07-01T13:13:26Z ``` Also ensure you have permission to create MutatingWebhookConfigurations: ```bash -$ kubectl auth can-i create mutatingwebhookconfigurations +kubectl auth can-i create mutatingwebhookconfigurations yes ``` @@ -363,14 +363,14 @@ Example failure: Ensure the Linkerd ValidatingWebhookConfiguration exists: ```bash -$ kubectl get validatingwebhookconfigurations | grep linkerd +kubectl get validatingwebhookconfigurations | grep linkerd linkerd-sp-validator-webhook-config 2019-07-01T13:13:26Z ``` Also ensure you have permission to create ValidatingWebhookConfigurations: ```bash -$ kubectl auth can-i create validatingwebhookconfigurations +kubectl auth can-i create validatingwebhookconfigurations yes ``` @@ -418,7 +418,7 @@ Example failure: Ensure the Linkerd ConfigMap exists: ```bash -$ kubectl -n linkerd get configmap/linkerd-config +kubectl -n linkerd get configmap/linkerd-config NAME DATA AGE linkerd-config 3 61m ``` @@ -426,7 +426,7 @@ linkerd-config 3 61m Also ensure you have permission to create ConfigMaps: ```bash -$ kubectl -n linkerd auth can-i create configmap +kubectl -n linkerd auth can-i create configmap yes ``` @@ -780,7 +780,7 @@ Example failure: Verify the state of the control plane pods with: ```bash -$ kubectl -n linkerd get po +kubectl -n linkerd get po NAME READY STATUS RESTARTS AGE linkerd-destination-5fd7b5d466-szgqm 2/2 Running 1 12m linkerd-identity-54df78c479-hbh5m 2/2 Running 0 12m @@ -862,7 +862,7 @@ Ensure you can connect to the Linkerd version check endpoint from the environment the `linkerd` cli is running: ```bash -$ curl "https://versioncheck.linkerd.io/version.json?version=edge-19.1.2&uuid=test-uuid&source=cli" +curl "https://versioncheck.linkerd.io/version.json?version=edge-19.1.2&uuid=test-uuid&source=cli" {"stable":"stable-2.1.0","edge":"edge-19.1.2"} ``` @@ -921,7 +921,7 @@ normally. Example failure: ```bash -$ linkerd check --proxy --namespace foo +linkerd check --proxy --namespace foo ... × data plane namespace exists The "foo" namespace does not exist @@ -1045,7 +1045,7 @@ Ensure the kube-system namespace has the `config.linkerd.io/admission-webhooks:disabled` label: ```bash -$ kubectl get namespace kube-system -oyaml +kubectl get namespace kube-system -oyaml kind: Namespace apiVersion: v1 metadata: @@ -1118,7 +1118,7 @@ Example error: Ensure that the linkerd-cni-config ConfigMap exists in the CNI namespace: ```bash -$ kubectl get cm linkerd-cni-config -n linkerd-cni +kubectl get cm linkerd-cni-config -n linkerd-cni NAME PRIV CAPS SELINUX RUNASUSER FSGROUP SUPGROUP READONLYROOTFS VOLUMES linkerd-linkerd-cni-cni false RunAsAny RunAsAny RunAsAny RunAsAny false hostPath,secret ``` @@ -1126,7 +1126,7 @@ linkerd-linkerd-cni-cni false RunAsAny RunAsAny RunAsAny RunAs Also ensure you have permission to create ConfigMaps: ```bash -$ kubectl auth can-i create ConfigMaps +kubectl auth can-i create ConfigMaps yes ``` @@ -1143,7 +1143,7 @@ Example error: Ensure that the cluster role exists: ```bash -$ kubectl get clusterrole linkerd-cni +kubectl get clusterrole linkerd-cni NAME AGE linkerd-cni 54m ``` @@ -1151,7 +1151,7 @@ linkerd-cni 54m Also ensure you have permission to create ClusterRoles: ```bash -$ kubectl auth can-i create ClusterRoles +kubectl auth can-i create ClusterRoles yes ``` @@ -1168,7 +1168,7 @@ Example error: Ensure that the cluster role binding exists: ```bash -$ kubectl get clusterrolebinding linkerd-cni +kubectl get clusterrolebinding linkerd-cni NAME AGE linkerd-cni 54m ``` @@ -1176,7 +1176,7 @@ linkerd-cni 54m Also ensure you have permission to create ClusterRoleBindings: ```bash -$ kubectl auth can-i create ClusterRoleBindings +kubectl auth can-i create ClusterRoleBindings yes ``` @@ -1193,7 +1193,7 @@ Example error: Ensure that the CNI service account exists in the CNI namespace: ```bash -$ kubectl get ServiceAccount linkerd-cni -n linkerd-cni +kubectl get ServiceAccount linkerd-cni -n linkerd-cni NAME SECRETS AGE linkerd-cni 1 45m ``` @@ -1201,7 +1201,7 @@ linkerd-cni 1 45m Also ensure you have permission to create ServiceAccount: ```bash -$ kubectl auth can-i create ServiceAccounts -n linkerd-cni +kubectl auth can-i create ServiceAccounts -n linkerd-cni yes ``` @@ -1218,7 +1218,7 @@ Example error: Ensure that the CNI daemonset exists in the CNI namespace: ```bash -$ kubectl get ds -n linkerd-cni +kubectl get ds -n linkerd-cni NAME DESIRED CURRENT READY UP-TO-DATE AVAILABLE NODE SELECTOR AGE linkerd-cni 1 1 1 1 1 beta.kubernetes.io/os=linux 14m ``` @@ -1226,7 +1226,7 @@ linkerd-cni 1 1 1 1 1 beta.kubernet Also ensure you have permission to create DaemonSets: ```bash -$ kubectl auth can-i create DaemonSets -n linkerd-cni +kubectl auth can-i create DaemonSets -n linkerd-cni yes ``` @@ -1243,7 +1243,7 @@ Example failure: Ensure that all the CNI pods are running: ```bash -$ kubectl get po -n linkerd-cn +kubectl get po -n linkerd-cn NAME READY STATUS RESTARTS AGE linkerd-cni-rzp2q 1/1 Running 0 9m20s linkerd-cni-mf564 1/1 Running 0 9m22s @@ -1253,7 +1253,7 @@ linkerd-cni-p5670 1/1 Running 0 9m25s Ensure that all pods have finished the deployment of the CNI config and binary: ```bash -$ kubectl logs linkerd-cni-rzp2q -n linkerd-cni +kubectl logs linkerd-cni-rzp2q -n linkerd-cni Wrote linkerd CNI binaries to /host/opt/cni/bin Created CNI config /host/etc/cni/net.d/10-kindnet.conflist Done configuring CNI. Sleep=true @@ -1281,7 +1281,7 @@ Make sure multicluster extension is correctly installed and that the `links.multicluster.linkerd.io` CRD is present. ```bash -$ kubectl get crds | grep multicluster +kubectl get crds | grep multicluster NAME CREATED AT links.multicluster.linkerd.io 2021-03-10T09:58:10Z ``` @@ -1360,7 +1360,7 @@ the rules section. Expected rules for `linkerd-service-mirror-access-local-resources` cluster role: ```bash -$ kubectl --context=local get clusterrole linkerd-service-mirror-access-local-resources -o yaml +kubectl --context=local get clusterrole linkerd-service-mirror-access-local-resources -o yaml kind: ClusterRole metadata: labels: @@ -1393,7 +1393,7 @@ rules: Expected rules for `linkerd-service-mirror-read-remote-creds` role: ```bash -$ kubectl --context=local get role linkerd-service-mirror-read-remote-creds -n linkerd-multicluster -o yaml +kubectl --context=local get role linkerd-service-mirror-read-remote-creds -n linkerd-multicluster -o yaml kind: Role metadata: labels: @@ -1426,7 +1426,7 @@ everything to start up. If this is a permanent error, you'll want to validate the state of the controller pod with: ```bash -$ kubectl --all-namespaces get po --selector linkerd.io/control-plane-component=linkerd-service-mirror +kubectl --all-namespaces get po --selector linkerd.io/control-plane-component=linkerd-service-mirror NAME READY STATUS RESTARTS AGE linkerd-service-mirror-7bb8ff5967-zg265 2/2 Running 0 50m ``` @@ -1544,7 +1544,7 @@ Example failure: Ensure the linkerd-viz extension ClusterRoles exist: ```bash -$ kubectl get clusterroles | grep linkerd-viz +kubectl get clusterroles | grep linkerd-viz linkerd-linkerd-viz-metrics-api 2021-01-26T18:02:17Z linkerd-linkerd-viz-prometheus 2021-01-26T18:02:17Z linkerd-linkerd-viz-tap 2021-01-26T18:02:17Z @@ -1555,7 +1555,7 @@ linkerd-linkerd-viz-web-check 2021-01-2 Also ensure you have permission to create ClusterRoles: ```bash -$ kubectl auth can-i create clusterroles +kubectl auth can-i create clusterroles yes ``` @@ -1572,7 +1572,7 @@ Example failure: Ensure the linkerd-viz extension ClusterRoleBindings exist: ```bash -$ kubectl get clusterrolebindings | grep linkerd-viz +kubectl get clusterrolebindings | grep linkerd-viz linkerd-linkerd-viz-metrics-api ClusterRole/linkerd-linkerd-viz-metrics-api 18h linkerd-linkerd-viz-prometheus ClusterRole/linkerd-linkerd-viz-prometheus 18h linkerd-linkerd-viz-tap ClusterRole/linkerd-linkerd-viz-tap 18h @@ -1584,7 +1584,7 @@ linkerd-linkerd-viz-web-check ClusterRole/linkerd-linke Also ensure you have permission to create ClusterRoleBindings: ```bash -$ kubectl auth can-i create clusterrolebindings +kubectl auth can-i create clusterrolebindings yes ``` @@ -1673,7 +1673,7 @@ requirements in the cluster: Ensure all the linkerd-viz pods are injected ```bash -$ kubectl -n linkerd-viz get pods +kubectl -n linkerd-viz get pods NAME READY STATUS RESTARTS AGE grafana-68cddd7cc8-nrv4h 2/2 Running 3 18h metrics-api-77f684f7c7-hnw8r 2/2 Running 2 18h @@ -1697,7 +1697,7 @@ Make sure that the `proxy-injector` is working correctly by running Ensure all the linkerd-viz pods are running with 2/2 ```bash -$ kubectl -n linkerd-viz get pods +kubectl -n linkerd-viz get pods NAME READY STATUS RESTARTS AGE grafana-68cddd7cc8-nrv4h 2/2 Running 3 18h metrics-api-77f684f7c7-hnw8r 2/2 Running 2 18h @@ -1880,7 +1880,7 @@ versions in sync by updating either the CLI or linkerd-jaeger as necessary. Ensure all the jaeger pods are injected ```bash -$ kubectl -n linkerd-jaeger get pods +kubectl -n linkerd-jaeger get pods NAME READY STATUS RESTARTS AGE collector-69cc44dfbc-rhpfg 2/2 Running 0 11s jaeger-6f98d5c979-scqlq 2/2 Running 0 11s @@ -1901,7 +1901,7 @@ Make sure that the `proxy-injector` is working correctly by running Ensure all the linkerd-jaeger pods are running with 2/2 ```bash -$ kubectl -n linkerd-jaeger get pods +kubectl -n linkerd-jaeger get pods NAME READY STATUS RESTARTS AGE jaeger-injector-548684d74b-bcq5h 2/2 Running 0 5s collector-69cc44dfbc-wqf6s 2/2 Running 0 5s @@ -1950,7 +1950,7 @@ Ensure you can connect to the Linkerd Buoyant version check endpoint from the environment the `linkerd` cli is running: ```bash -$ curl https://buoyant.cloud/version.json +curl https://buoyant.cloud/version.json {"linkerd-buoyant":"v0.4.4"} ``` @@ -2015,7 +2015,7 @@ linkerd-buoyant install | kubectl apply -f - Ensure that the cluster role exists: ```bash -$ kubectl get clusterrole buoyant-cloud-agent +kubectl get clusterrole buoyant-cloud-agent NAME CREATED AT buoyant-cloud-agent 2020-11-13T00:59:50Z ``` @@ -2023,7 +2023,7 @@ buoyant-cloud-agent 2020-11-13T00:59:50Z Also ensure you have permission to create ClusterRoles: ```bash -$ kubectl auth can-i create ClusterRoles +kubectl auth can-i create ClusterRoles yes ``` @@ -2038,7 +2038,7 @@ yes Ensure that the cluster role binding exists: ```bash -$ kubectl get clusterrolebinding buoyant-cloud-agent +kubectl get clusterrolebinding buoyant-cloud-agent NAME ROLE AGE buoyant-cloud-agent ClusterRole/buoyant-cloud-agent 301d ``` @@ -2046,7 +2046,7 @@ buoyant-cloud-agent ClusterRole/buoyant-cloud-agent 301d Also ensure you have permission to create ClusterRoleBindings: ```bash -$ kubectl auth can-i create ClusterRoleBindings +kubectl auth can-i create ClusterRoleBindings yes ``` @@ -2061,7 +2061,7 @@ yes Ensure that the service account exists: ```bash -$ kubectl -n buoyant-cloud get serviceaccount buoyant-cloud-agent +kubectl -n buoyant-cloud get serviceaccount buoyant-cloud-agent NAME SECRETS AGE buoyant-cloud-agent 1 301d ``` @@ -2069,7 +2069,7 @@ buoyant-cloud-agent 1 301d Also ensure you have permission to create ServiceAccounts: ```bash -$ kubectl -n buoyant-cloud auth can-i create ServiceAccount +kubectl -n buoyant-cloud auth can-i create ServiceAccount yes ``` @@ -2084,7 +2084,7 @@ yes Ensure that the secret exists: ```bash -$ kubectl -n buoyant-cloud get secret buoyant-cloud-id +kubectl -n buoyant-cloud get secret buoyant-cloud-id NAME TYPE DATA AGE buoyant-cloud-id Opaque 4 301d ``` @@ -2092,7 +2092,7 @@ buoyant-cloud-id Opaque 4 301d Also ensure you have permission to create ServiceAccounts: ```bash -$ kubectl -n buoyant-cloud auth can-i create ServiceAccount +kubectl -n buoyant-cloud auth can-i create ServiceAccount yes ``` @@ -2130,7 +2130,7 @@ everything to start up. If this is a permanent error, you'll want to validate the state of the `buoyant-cloud-agent` Deployment with: ```bash -$ kubectl -n buoyant-cloud get po --selector app=buoyant-cloud-agent +kubectl -n buoyant-cloud get po --selector app=buoyant-cloud-agent NAME READY STATUS RESTARTS AGE buoyant-cloud-agent-6b8c6888d7-htr7d 2/2 Running 0 156m ``` @@ -2153,7 +2153,7 @@ Ensure the `buoyant-cloud-agent` pod is injected, the `READY` column should show `2/2`: ```bash -$ kubectl -n buoyant-cloud get pods --selector app=buoyant-cloud-agent +kubectl -n buoyant-cloud get pods --selector app=buoyant-cloud-agent NAME READY STATUS RESTARTS AGE buoyant-cloud-agent-6b8c6888d7-htr7d 2/2 Running 0 161m ``` @@ -2172,7 +2172,7 @@ Make sure that the `proxy-injector` is working correctly by running Check the version with: ```bash -$ linkerd-buoyant version +linkerd-buoyant version CLI version: v0.4.4 Agent version: v0.4.4 ``` @@ -2231,7 +2231,7 @@ everything to start up. If this is a permanent error, you'll want to validate the state of the `buoyant-cloud-metrics` DaemonSet with: ```bash -$ kubectl -n buoyant-cloud get po --selector app=buoyant-cloud-metrics +kubectl -n buoyant-cloud get po --selector app=buoyant-cloud-metrics NAME READY STATUS RESTARTS AGE buoyant-cloud-metrics-kt9mv 2/2 Running 0 163m buoyant-cloud-metrics-q8jhj 2/2 Running 0 163m @@ -2257,7 +2257,7 @@ Ensure the `buoyant-cloud-metrics` pods are injected, the `READY` column should show `2/2`: ```bash -$ kubectl -n buoyant-cloud get pods --selector app=buoyant-cloud-metrics +kubectl -n buoyant-cloud get pods --selector app=buoyant-cloud-metrics NAME READY STATUS RESTARTS AGE buoyant-cloud-metrics-kt9mv 2/2 Running 0 166m buoyant-cloud-metrics-q8jhj 2/2 Running 0 166m @@ -2279,7 +2279,7 @@ Make sure that the `proxy-injector` is working correctly by running Check the version with: ```bash -$ kubectl -n buoyant-cloud get daemonset/buoyant-cloud-metrics -o jsonpath='{.metadata.labels}' +kubectl -n buoyant-cloud get daemonset/buoyant-cloud-metrics -o jsonpath='{.metadata.labels}' {"app.kubernetes.io/name":"metrics","app.kubernetes.io/part-of":"buoyant-cloud","app.kubernetes.io/version":"v0.4.4"} ``` diff --git a/linkerd.io/content/2.13/tasks/upgrade.md b/linkerd.io/content/2.13/tasks/upgrade.md index 08c3e70a35..3c00e0871b 100644 --- a/linkerd.io/content/2.13/tasks/upgrade.md +++ b/linkerd.io/content/2.13/tasks/upgrade.md @@ -303,7 +303,7 @@ Find the release name you used for the `linkerd2` chart, and the namespace where this release stored its config: ```bash -$ helm ls -A +helm ls -A NAME NAMESPACE REVISION UPDATED STATUS CHART APP VERSION linkerd default 1 2021-11-22 17:14:50.751436374 -0500 -05 deployed linkerd2-2.11.1 stable-2.11.1 ``` @@ -336,18 +336,18 @@ the `linkerd-crds`, `linkerd-control-plane` and `linkerd-smi` charts: ```bash # First migrate the CRDs -$ helm -n default get manifest linkerd | \ +helm -n default get manifest linkerd | \ yq 'select(.kind == "CustomResourceDefinition") | .metadata.name' | \ grep -v '\-\-\-' | \ xargs -n1 sh -c \ 'kubectl annotate --overwrite crd/$0 meta.helm.sh/release-name=linkerd-crds meta.helm.sh/release-namespace=linkerd' # Special case for TrafficSplit (only use if you have TrafficSplit CRs) -$ kubectl annotate --overwrite crd/trafficsplits.split.smi-spec.io \ +kubectl annotate --overwrite crd/trafficsplits.split.smi-spec.io \ meta.helm.sh/release-name=linkerd-smi meta.helm.sh/release-namespace=linkerd-smi # Now migrate all the other resources -$ helm -n default get manifest linkerd | \ +helm -n default get manifest linkerd | \ yq 'select(.kind != "CustomResourceDefinition")' | \ yq '.kind, .metadata.name, .metadata.namespace' | \ grep -v '\-\-\-' | @@ -361,14 +361,14 @@ above. ```bash # First make sure you update the helm repo -$ helm repo up +helm repo up # Install the linkerd-crds chart -$ helm install linkerd-crds -n linkerd --create-namespace linkerd/linkerd-crds +helm install linkerd-crds -n linkerd --create-namespace linkerd/linkerd-crds # Install the linkerd-control-plane chart # (remember to add any customizations you retrieved above) -$ helm install linkerd-control-plane \ +helm install linkerd-control-plane \ -n linkerd \ --set-file identityTrustAnchorsPEM=ca.crt \ --set-file identity.issuer.tls.crtPEM=issuer.crt \ @@ -376,8 +376,8 @@ $ helm install linkerd-control-plane \ linkerd/linkerd-control-plane # Optional: if using TrafficSplit CRs -$ helm repo add l5d-smi https://linkerd.github.io/linkerd-smi -$ helm install linkerd-smi -n linkerd-smi --create-namespace l5d-smi/linkerd-smi +helm repo add l5d-smi https://linkerd.github.io/linkerd-smi +helm install linkerd-smi -n linkerd-smi --create-namespace l5d-smi/linkerd-smi ``` ##### Cleaning up the old linkerd2 Helm release @@ -388,7 +388,7 @@ remove the Helm release config for the old `linkerd2` chart (assuming you used the "Secret" storage backend, which is the default): ```bash -$ kubectl -n default delete secret \ +kubectl -n default delete secret \ --field-selector type=helm.sh/release.v1 \ -l name=linkerd,owner=helm ``` diff --git a/linkerd.io/content/2.14/reference/cli/check.md b/linkerd.io/content/2.14/reference/cli/check.md index 7cd61cd237..67a2486908 100644 --- a/linkerd.io/content/2.14/reference/cli/check.md +++ b/linkerd.io/content/2.14/reference/cli/check.md @@ -12,7 +12,7 @@ for a full list of all the possible checks, what they do and how to fix them. ## Example output ```bash -$ linkerd check +linkerd check kubernetes-api -------------- √ can initialize the client diff --git a/linkerd.io/content/2.14/reference/iptables.md b/linkerd.io/content/2.14/reference/iptables.md index 67a7ea89de..9b4d229a59 100644 --- a/linkerd.io/content/2.14/reference/iptables.md +++ b/linkerd.io/content/2.14/reference/iptables.md @@ -164,7 +164,7 @@ Alternatively, if you want to inspect the iptables rules created for a pod, you can retrieve them through the following command: ```bash -$ kubectl -n logs linkerd-init +kubectl -n logs linkerd-init # where is the name of the pod # you want to see the iptables rules for ``` diff --git a/linkerd.io/content/2.14/tasks/configuring-dynamic-request-routing.md b/linkerd.io/content/2.14/tasks/configuring-dynamic-request-routing.md index c38f2a438c..e554bc6ac4 100644 --- a/linkerd.io/content/2.14/tasks/configuring-dynamic-request-routing.md +++ b/linkerd.io/content/2.14/tasks/configuring-dynamic-request-routing.md @@ -67,7 +67,7 @@ Requests to `/echo` on port 9898 to the frontend pod will get forwarded the pod pointed by the Service `backend-a-podinfo`: ```bash -$ curl -sX POST localhost:9898/echo \ +curl -sX POST localhost:9898/echo \ | grep -o 'PODINFO_UI_MESSAGE=. backend' PODINFO_UI_MESSAGE=A backend @@ -161,7 +161,7 @@ to the `backend-a-podinfo` Service. The previous requests should still reach `backend-a-podinfo` only: ```bash -$ curl -sX POST localhost:9898/echo \ +curl -sX POST localhost:9898/echo \ | grep -o 'PODINFO_UI_MESSAGE=. backend' PODINFO_UI_MESSAGE=A backend @@ -171,7 +171,7 @@ But if we add the "`x-request-id: alternative`" header they get routed to `backend-b-podinfo`: ```bash -$ curl -sX POST \ +curl -sX POST \ -H 'x-request-id: alternative' \ localhost:9898/echo \ | grep -o 'PODINFO_UI_MESSAGE=. backend' diff --git a/linkerd.io/content/2.14/tasks/configuring-per-route-policy.md b/linkerd.io/content/2.14/tasks/configuring-per-route-policy.md index a5c8b5c2ef..63b79fc6d4 100644 --- a/linkerd.io/content/2.14/tasks/configuring-per-route-policy.md +++ b/linkerd.io/content/2.14/tasks/configuring-per-route-policy.md @@ -30,7 +30,7 @@ haven't already done this. Inject and install the Books demo application: ```bash -$ kubectl create ns booksapp && \ +kubectl create ns booksapp && \ curl --proto '=https' --tlsv1.2 -sSfL https://run.linkerd.io/booksapp.yml \ | linkerd inject - \ | kubectl -n booksapp apply -f - @@ -44,21 +44,21 @@ run in the `booksapp` namespace. Confirm that the Linkerd data plane was injected successfully: ```bash -$ linkerd check -n booksapp --proxy -o short +linkerd check -n booksapp --proxy -o short ``` You can take a quick look at all the components that were added to your cluster by running: ```bash -$ kubectl -n booksapp get all +kubectl -n booksapp get all ``` Once the rollout has completed successfully, you can access the app itself by port-forwarding `webapp` locally: ```bash -$ kubectl -n booksapp port-forward svc/webapp 7000 & +kubectl -n booksapp port-forward svc/webapp 7000 & ``` Open [http://localhost:7000/](http://localhost:7000/) in your browser to see the @@ -87,7 +87,7 @@ First, let's run the `linkerd viz authz` command to list the authorization resources that currently exist for the `authors` deployment: ```bash -$ linkerd viz authz -n booksapp deploy/authors +linkerd viz authz -n booksapp deploy/authors ROUTE SERVER AUTHORIZATION UNAUTHORIZED SUCCESS RPS LATENCY_P50 LATENCY_P95 LATENCY_P99 default default:all-unauthenticated default/all-unauthenticated 0.0rps 70.31% 8.1rps 1ms 43ms 49ms probe default:all-unauthenticated default/probe 0.0rps 100.00% 0.3rps 1ms 1ms 1ms @@ -124,7 +124,7 @@ Now that we've defined a [`Server`] for the authors `Deployment`, we can run the currently unauthorized: ```bash -$ linkerd viz authz -n booksapp deploy/authors +linkerd viz authz -n booksapp deploy/authors ROUTE SERVER AUTHORIZATION UNAUTHORIZED SUCCESS RPS LATENCY_P50 LATENCY_P95 LATENCY_P99 default authors-server 9.5rps 0.00% 0.0rps 0ms 0ms 0ms probe authors-server default/probe 0.0rps 100.00% 0.1rps 1ms 1ms 1ms @@ -312,7 +312,7 @@ network (0.0.0.0). Running `linkerd viz authz` again, we can now see that our new policies exist: ```bash -$ linkerd viz authz -n booksapp deploy/authors +linkerd viz authz -n booksapp deploy/authors ROUTE SERVER AUTHORIZATION UNAUTHORIZED SUCCESS RPS LATENCY_P50 LATENCY_P95 LATENCY_P99 authors-get-route authors-server authorizationpolicy/authors-get-policy 0.0rps 100.00% 0.1rps 2ms 2ms 2ms authors-probe-route authors-server authorizationpolicy/authors-probe-policy 0.0rps 100.00% 0.1rps 1ms 1ms 1ms @@ -383,7 +383,7 @@ requests, but we haven't _authorized_ requests to that route. Running the requests to `authors-modify-route`: ```bash -$ linkerd viz authz -n booksapp deploy/authors +linkerd viz authz -n booksapp deploy/authors ROUTE SERVER AUTHORIZATION UNAUTHORIZED SUCCESS RPS LATENCY_P50 LATENCY_P95 LATENCY_P99 authors-get-route authors-server authorizationpolicy/authors-get-policy - - - - - - authors-modify-route authors-server 9.7rps 0.00% 0.0rps 0ms 0ms 0ms @@ -442,7 +442,7 @@ Running the `linkerd viz authz` command one last time, we now see that all traffic is authorized: ```bash -$ linkerd viz authz -n booksapp deploy/authors +linkerd viz authz -n booksapp deploy/authors ROUTE SERVER AUTHORIZATION UNAUTHORIZED SUCCESS RPS LATENCY_P50 LATENCY_P95 LATENCY_P99 authors-get-route authors-server authorizationpolicy/authors-get-policy 0.0rps 100.00% 0.1rps 0ms 0ms 0ms authors-modify-route authors-server authorizationpolicy/authors-modify-policy 0.0rps 100.00% 0.0rps 0ms 0ms 0ms diff --git a/linkerd.io/content/2.14/tasks/getting-per-route-metrics.md b/linkerd.io/content/2.14/tasks/getting-per-route-metrics.md index 34ee2bff6a..c2db8c0965 100644 --- a/linkerd.io/content/2.14/tasks/getting-per-route-metrics.md +++ b/linkerd.io/content/2.14/tasks/getting-per-route-metrics.md @@ -24,7 +24,7 @@ per-route authorization. You can view per-route metrics in the CLI by running `linkerd viz routes`: ```bash -$ linkerd viz routes svc/webapp +linkerd viz routes svc/webapp ROUTE SERVICE SUCCESS RPS LATENCY_P50 LATENCY_P95 LATENCY_P99 GET / webapp 100.00% 0.6rps 25ms 30ms 30ms GET /authors/{id} webapp 100.00% 0.6rps 22ms 29ms 30ms @@ -44,7 +44,7 @@ specified in your service profile will end up there. It is also possible to look the metrics up by other resource types, such as: ```bash -$ linkerd viz routes deploy/webapp +linkerd viz routes deploy/webapp ROUTE SERVICE SUCCESS RPS LATENCY_P50 LATENCY_P95 LATENCY_P99 [DEFAULT] kubernetes 0.00% 0.0rps 0ms 0ms 0ms GET / webapp 100.00% 0.5rps 27ms 38ms 40ms @@ -63,7 +63,7 @@ Then, it is possible to filter all the way down to requests going from a specific resource to other services: ```bash -$ linkerd viz routes deploy/webapp --to svc/books +linkerd viz routes deploy/webapp --to svc/books ROUTE SERVICE SUCCESS RPS LATENCY_P50 LATENCY_P95 LATENCY_P99 DELETE /books/{id}.json books 100.00% 0.5rps 18ms 29ms 30ms GET /books.json books 100.00% 1.1rps 7ms 12ms 18ms diff --git a/linkerd.io/content/2.14/tasks/multicluster-using-statefulsets.md b/linkerd.io/content/2.14/tasks/multicluster-using-statefulsets.md index 9d8730b5b0..c720c09563 100644 --- a/linkerd.io/content/2.14/tasks/multicluster-using-statefulsets.md +++ b/linkerd.io/content/2.14/tasks/multicluster-using-statefulsets.md @@ -48,8 +48,8 @@ The first step is to clone the demo repository on your local machine. ```sh # clone example repository -$ git clone git@github.com:mateiidavid/l2d-k3d-statefulset.git -$ cd l2d-k3d-statefulset +git clone git@github.com:mateiidavid/l2d-k3d-statefulset.git +cd l2d-k3d-statefulset ``` The second step consists of creating two `k3d` clusters named `east` and `west`, @@ -60,10 +60,10 @@ everything. ```sh # create k3d clusters -$ ./create.sh +./create.sh # list the clusters -$ k3d cluster list +k3d cluster list NAME SERVERS AGENTS LOADBALANCER east 1/1 0/0 true west 1/1 0/0 true @@ -78,10 +78,10 @@ provided scripts, but feel free to have a look! ```sh # Install Linkerd and multicluster, output to check should be a success -$ ./install.sh +./install.sh # Next, link the two clusters together -$ ./link.sh +./link.sh ``` Perfect! If you've made it this far with no errors, then it's a good sign. In @@ -101,17 +101,17 @@ communication. First, we will deploy our pods and services: ```sh # deploy services and mesh namespaces -$ ./deploy.sh +./deploy.sh # verify both clusters # # verify east -$ kubectl --context=k3d-east get pods +kubectl --context=k3d-east get pods NAME READY STATUS RESTARTS AGE curl-56dc7d945d-96r6p 2/2 Running 0 7s # verify west has headless service -$ kubectl --context=k3d-west get services +kubectl --context=k3d-west get services NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE kubernetes ClusterIP 10.43.0.1 443/TCP 10m nginx-svc ClusterIP None 80/TCP 8s @@ -119,7 +119,7 @@ nginx-svc ClusterIP None 80/TCP 8s # verify west has statefulset # # this may take a while to come up -$ kubectl --context=k3d-west get pods +kubectl --context=k3d-west get pods NAME READY STATUS RESTARTS AGE nginx-set-0 2/2 Running 0 53s nginx-set-1 2/2 Running 0 43s @@ -130,7 +130,7 @@ Before we go further, let's have a look at the endpoints object for the `nginx-svc`: ```sh -$ kubectl --context=k3d-west get endpoints nginx-svc -o yaml +kubectl --context=k3d-west get endpoints nginx-svc -o yaml ... subsets: - addresses: @@ -170,23 +170,23 @@ would get an answer back. We can test this out by applying the curl pod to the `west` cluster: ```sh -$ kubectl --context=k3d-west apply -f east/curl.yml -$ kubectl --context=k3d-west get pods +kubectl --context=k3d-west apply -f east/curl.yml +kubectl --context=k3d-west get pods NAME READY STATUS RESTARTS AGE nginx-set-0 2/2 Running 0 5m8s nginx-set-1 2/2 Running 0 4m58s nginx-set-2 2/2 Running 0 4m51s curl-56dc7d945d-s4n8j 0/2 PodInitializing 0 4s -$ kubectl --context=k3d-west exec -it curl-56dc7d945d-s4n8j -c curl -- bin/sh -/$ # prompt for curl pod +kubectl --context=k3d-west exec -it curl-56dc7d945d-s4n8j -c curl -- bin/sh +/# prompt for curl pod ``` If we now curl one of these instances, we will get back a response. ```sh # exec'd on the pod -/ $ curl nginx-set-0.nginx-svc.default.svc.west.cluster.local +/ curl nginx-set-0.nginx-svc.default.svc.west.cluster.local " @@ -218,10 +218,10 @@ Now, let's do the same, but this time from the `east` cluster. We will first export the service. ```sh -$ kubectl --context=k3d-west label service nginx-svc mirror.linkerd.io/exported="true" +kubectl --context=k3d-west label service nginx-svc mirror.linkerd.io/exported="true" service/nginx-svc labeled -$ kubectl --context=k3d-east get services +kubectl --context=k3d-east get services NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE kubernetes ClusterIP 10.43.0.1 443/TCP 20h nginx-svc-west ClusterIP None 80/TCP 29s @@ -235,7 +235,7 @@ endpoints for `nginx-svc-west` will have the same hostnames, but each hostname will point to one of the services we see above: ```sh -$ kubectl --context=k3d-east get endpoints nginx-svc-west -o yaml +kubectl --context=k3d-east get endpoints nginx-svc-west -o yaml subsets: - addresses: - hostname: nginx-set-0 @@ -251,17 +251,17 @@ cluster (`west`), will be mirrored as a clusterIP service. We will see in a second why this matters. ```sh -$ kubectl --context=k3d-east get pods +kubectl --context=k3d-east get pods NAME READY STATUS RESTARTS AGE curl-56dc7d945d-96r6p 2/2 Running 0 23m # exec and curl -$ kubectl --context=k3d-east exec pod curl-56dc7d945d-96r6p -it -c curl -- bin/sh +kubectl --context=k3d-east exec pod curl-56dc7d945d-96r6p -it -c curl -- bin/sh # we want to curl the same hostname we see in the endpoints object above. # however, the service and cluster domain will now be different, since we # are in a different cluster. # -/ $ curl nginx-set-0.nginx-svc-west.default.svc.east.cluster.local +/ curl nginx-set-0.nginx-svc-west.default.svc.east.cluster.local @@ -329,8 +329,8 @@ validation. To clean-up, you can remove both clusters entirely using the k3d CLI: ```sh -$ k3d cluster delete east +k3d cluster delete east cluster east deleted -$ k3d cluster delete west +k3d cluster delete west cluster west deleted ``` diff --git a/linkerd.io/content/2.14/tasks/restricting-access.md b/linkerd.io/content/2.14/tasks/restricting-access.md index 0b0b0c94b7..38ebdaeb3d 100644 --- a/linkerd.io/content/2.14/tasks/restricting-access.md +++ b/linkerd.io/content/2.14/tasks/restricting-access.md @@ -21,9 +21,9 @@ haven't already done this. Inject and install the Emojivoto application: ```bash -$ linkerd inject https://run.linkerd.io/emojivoto.yml | kubectl apply -f - +linkerd inject https://run.linkerd.io/emojivoto.yml | kubectl apply -f - ... -$ linkerd check -n emojivoto --proxy -o short +linkerd check -n emojivoto --proxy -o short ... ``` diff --git a/linkerd.io/content/2.14/tasks/securing-linkerd-tap.md b/linkerd.io/content/2.14/tasks/securing-linkerd-tap.md index 8a802c890c..639f81692f 100644 --- a/linkerd.io/content/2.14/tasks/securing-linkerd-tap.md +++ b/linkerd.io/content/2.14/tasks/securing-linkerd-tap.md @@ -60,7 +60,7 @@ kubectl auth can-i watch deployments.tap.linkerd.io -n emojivoto --as $(whoami) You can also use the Linkerd CLI's `--as` flag to confirm: ```bash -$ linkerd viz tap -n linkerd deploy/linkerd-controller --as $(whoami) +linkerd viz tap -n linkerd deploy/linkerd-controller --as $(whoami) Cannot connect to Linkerd Viz: namespaces is forbidden: User "XXXX" cannot list resource "namespaces" in API group "" at the cluster scope Validate the install with: linkerd viz check ... @@ -77,7 +77,7 @@ To enable tap access to all resources in all namespaces, you may bind your user to the `linkerd-linkerd-tap-admin` ClusterRole, installed by default: ```bash -$ kubectl describe clusterroles/linkerd-linkerd-viz-tap-admin +kubectl describe clusterroles/linkerd-linkerd-viz-tap-admin Name: linkerd-linkerd-viz-tap-admin Labels: component=tap linkerd.io/extension=viz @@ -109,7 +109,7 @@ kubectl create clusterrolebinding \ You can verify you now have tap access with: ```bash -$ linkerd viz tap -n linkerd deploy/linkerd-controller --as $(whoami) +linkerd viz tap -n linkerd deploy/linkerd-controller --as $(whoami) req id=3:0 proxy=in src=10.244.0.1:37392 dst=10.244.0.13:9996 tls=not_provided_by_remote :method=GET :authority=10.244.0.13:9996 :path=/ping ... ``` @@ -143,14 +143,14 @@ Because GCloud provides this additional level of access, there are cases where not. To validate this, check whether your GCloud user has Tap access: ```bash -$ kubectl auth can-i watch pods.tap.linkerd.io --all-namespaces +kubectl auth can-i watch pods.tap.linkerd.io --all-namespaces yes ``` And then validate whether your RBAC user has Tap access: ```bash -$ kubectl auth can-i watch pods.tap.linkerd.io --all-namespaces --as $(gcloud config get-value account) +kubectl auth can-i watch pods.tap.linkerd.io --all-namespaces --as $(gcloud config get-value account) no - no RBAC policy matched ``` @@ -187,14 +187,14 @@ privileges necessary to tap resources. To confirm: ```bash -$ kubectl auth can-i watch pods.tap.linkerd.io --all-namespaces --as system:serviceaccount:linkerd-viz:web +kubectl auth can-i watch pods.tap.linkerd.io --all-namespaces --as system:serviceaccount:linkerd-viz:web yes ``` This access is enabled via a `linkerd-linkerd-viz-web-admin` ClusterRoleBinding: ```bash -$ kubectl describe clusterrolebindings/linkerd-linkerd-viz-web-admin +kubectl describe clusterrolebindings/linkerd-linkerd-viz-web-admin Name: linkerd-linkerd-viz-web-admin Labels: component=web linkerd.io/extensions=viz @@ -227,6 +227,6 @@ kubectl delete clusterrolebindings/linkerd-linkerd-viz-web-admin To confirm: ```bash -$ kubectl auth can-i watch pods.tap.linkerd.io --all-namespaces --as system:serviceaccount:linkerd-viz:web +kubectl auth can-i watch pods.tap.linkerd.io --all-namespaces --as system:serviceaccount:linkerd-viz:web no ``` diff --git a/linkerd.io/content/2.14/tasks/troubleshooting.md b/linkerd.io/content/2.14/tasks/troubleshooting.md index 7ec6896a2d..8e7bf49e3c 100644 --- a/linkerd.io/content/2.14/tasks/troubleshooting.md +++ b/linkerd.io/content/2.14/tasks/troubleshooting.md @@ -230,7 +230,7 @@ Example failure: Ensure the Linkerd ClusterRoles exist: ```bash -$ kubectl get clusterroles | grep linkerd +kubectl get clusterroles | grep linkerd linkerd-linkerd-destination 9d linkerd-linkerd-identity 9d linkerd-linkerd-proxy-injector 9d @@ -240,7 +240,7 @@ linkerd-policy 9d Also ensure you have permission to create ClusterRoles: ```bash -$ kubectl auth can-i create clusterroles +kubectl auth can-i create clusterroles yes ``` @@ -257,7 +257,7 @@ Example failure: Ensure the Linkerd ClusterRoleBindings exist: ```bash -$ kubectl get clusterrolebindings | grep linkerd +kubectl get clusterrolebindings | grep linkerd linkerd-linkerd-destination 9d linkerd-linkerd-identity 9d linkerd-linkerd-proxy-injector 9d @@ -267,7 +267,7 @@ linkerd-destination-policy 9d Also ensure you have permission to create ClusterRoleBindings: ```bash -$ kubectl auth can-i create clusterrolebindings +kubectl auth can-i create clusterrolebindings yes ``` @@ -284,7 +284,7 @@ Example failure: Ensure the Linkerd ServiceAccounts exist: ```bash -$ kubectl -n linkerd get serviceaccounts +kubectl -n linkerd get serviceaccounts NAME SECRETS AGE default 1 14m linkerd-destination 1 14m @@ -297,7 +297,7 @@ Also ensure you have permission to create ServiceAccounts in the Linkerd namespace: ```bash -$ kubectl -n linkerd auth can-i create serviceaccounts +kubectl -n linkerd auth can-i create serviceaccounts yes ``` @@ -314,7 +314,7 @@ Example failure: Ensure the Linkerd CRD exists: ```bash -$ kubectl get customresourcedefinitions +kubectl get customresourcedefinitions NAME CREATED AT serviceprofiles.linkerd.io 2019-04-25T21:47:31Z ``` @@ -322,7 +322,7 @@ serviceprofiles.linkerd.io 2019-04-25T21:47:31Z Also ensure you have permission to create CRDs: ```bash -$ kubectl auth can-i create customresourcedefinitions +kubectl auth can-i create customresourcedefinitions yes ``` @@ -339,14 +339,14 @@ Example failure: Ensure the Linkerd MutatingWebhookConfigurations exists: ```bash -$ kubectl get mutatingwebhookconfigurations | grep linkerd +kubectl get mutatingwebhookconfigurations | grep linkerd linkerd-proxy-injector-webhook-config 2019-07-01T13:13:26Z ``` Also ensure you have permission to create MutatingWebhookConfigurations: ```bash -$ kubectl auth can-i create mutatingwebhookconfigurations +kubectl auth can-i create mutatingwebhookconfigurations yes ``` @@ -363,14 +363,14 @@ Example failure: Ensure the Linkerd ValidatingWebhookConfiguration exists: ```bash -$ kubectl get validatingwebhookconfigurations | grep linkerd +kubectl get validatingwebhookconfigurations | grep linkerd linkerd-sp-validator-webhook-config 2019-07-01T13:13:26Z ``` Also ensure you have permission to create ValidatingWebhookConfigurations: ```bash -$ kubectl auth can-i create validatingwebhookconfigurations +kubectl auth can-i create validatingwebhookconfigurations yes ``` @@ -418,7 +418,7 @@ Example failure: Ensure the Linkerd ConfigMap exists: ```bash -$ kubectl -n linkerd get configmap/linkerd-config +kubectl -n linkerd get configmap/linkerd-config NAME DATA AGE linkerd-config 3 61m ``` @@ -426,7 +426,7 @@ linkerd-config 3 61m Also ensure you have permission to create ConfigMaps: ```bash -$ kubectl -n linkerd auth can-i create configmap +kubectl -n linkerd auth can-i create configmap yes ``` @@ -780,7 +780,7 @@ Example failure: Verify the state of the control plane pods with: ```bash -$ kubectl -n linkerd get po +kubectl -n linkerd get po NAME READY STATUS RESTARTS AGE linkerd-destination-5fd7b5d466-szgqm 2/2 Running 1 12m linkerd-identity-54df78c479-hbh5m 2/2 Running 0 12m @@ -862,7 +862,7 @@ Ensure you can connect to the Linkerd version check endpoint from the environment the `linkerd` cli is running: ```bash -$ curl "https://versioncheck.linkerd.io/version.json?version=edge-19.1.2&uuid=test-uuid&source=cli" +curl "https://versioncheck.linkerd.io/version.json?version=edge-19.1.2&uuid=test-uuid&source=cli" {"stable":"stable-2.1.0","edge":"edge-19.1.2"} ``` @@ -921,7 +921,7 @@ normally. Example failure: ```bash -$ linkerd check --proxy --namespace foo +linkerd check --proxy --namespace foo ... × data plane namespace exists The "foo" namespace does not exist @@ -1045,7 +1045,7 @@ Ensure the kube-system namespace has the `config.linkerd.io/admission-webhooks:disabled` label: ```bash -$ kubectl get namespace kube-system -oyaml +kubectl get namespace kube-system -oyaml kind: Namespace apiVersion: v1 metadata: @@ -1118,7 +1118,7 @@ Example error: Ensure that the linkerd-cni-config ConfigMap exists in the CNI namespace: ```bash -$ kubectl get cm linkerd-cni-config -n linkerd-cni +kubectl get cm linkerd-cni-config -n linkerd-cni NAME PRIV CAPS SELINUX RUNASUSER FSGROUP SUPGROUP READONLYROOTFS VOLUMES linkerd-linkerd-cni-cni false RunAsAny RunAsAny RunAsAny RunAsAny false hostPath,secret ``` @@ -1126,7 +1126,7 @@ linkerd-linkerd-cni-cni false RunAsAny RunAsAny RunAsAny RunAs Also ensure you have permission to create ConfigMaps: ```bash -$ kubectl auth can-i create ConfigMaps +kubectl auth can-i create ConfigMaps yes ``` @@ -1143,7 +1143,7 @@ Example error: Ensure that the cluster role exists: ```bash -$ kubectl get clusterrole linkerd-cni +kubectl get clusterrole linkerd-cni NAME AGE linkerd-cni 54m ``` @@ -1151,7 +1151,7 @@ linkerd-cni 54m Also ensure you have permission to create ClusterRoles: ```bash -$ kubectl auth can-i create ClusterRoles +kubectl auth can-i create ClusterRoles yes ``` @@ -1168,7 +1168,7 @@ Example error: Ensure that the cluster role binding exists: ```bash -$ kubectl get clusterrolebinding linkerd-cni +kubectl get clusterrolebinding linkerd-cni NAME AGE linkerd-cni 54m ``` @@ -1176,7 +1176,7 @@ linkerd-cni 54m Also ensure you have permission to create ClusterRoleBindings: ```bash -$ kubectl auth can-i create ClusterRoleBindings +kubectl auth can-i create ClusterRoleBindings yes ``` @@ -1193,7 +1193,7 @@ Example error: Ensure that the CNI service account exists in the CNI namespace: ```bash -$ kubectl get ServiceAccount linkerd-cni -n linkerd-cni +kubectl get ServiceAccount linkerd-cni -n linkerd-cni NAME SECRETS AGE linkerd-cni 1 45m ``` @@ -1201,7 +1201,7 @@ linkerd-cni 1 45m Also ensure you have permission to create ServiceAccount: ```bash -$ kubectl auth can-i create ServiceAccounts -n linkerd-cni +kubectl auth can-i create ServiceAccounts -n linkerd-cni yes ``` @@ -1218,7 +1218,7 @@ Example error: Ensure that the CNI daemonset exists in the CNI namespace: ```bash -$ kubectl get ds -n linkerd-cni +kubectl get ds -n linkerd-cni NAME DESIRED CURRENT READY UP-TO-DATE AVAILABLE NODE SELECTOR AGE linkerd-cni 1 1 1 1 1 beta.kubernetes.io/os=linux 14m ``` @@ -1226,7 +1226,7 @@ linkerd-cni 1 1 1 1 1 beta.kubernet Also ensure you have permission to create DaemonSets: ```bash -$ kubectl auth can-i create DaemonSets -n linkerd-cni +kubectl auth can-i create DaemonSets -n linkerd-cni yes ``` @@ -1243,7 +1243,7 @@ Example failure: Ensure that all the CNI pods are running: ```bash -$ kubectl get po -n linkerd-cn +kubectl get po -n linkerd-cn NAME READY STATUS RESTARTS AGE linkerd-cni-rzp2q 1/1 Running 0 9m20s linkerd-cni-mf564 1/1 Running 0 9m22s @@ -1253,7 +1253,7 @@ linkerd-cni-p5670 1/1 Running 0 9m25s Ensure that all pods have finished the deployment of the CNI config and binary: ```bash -$ kubectl logs linkerd-cni-rzp2q -n linkerd-cni +kubectl logs linkerd-cni-rzp2q -n linkerd-cni Wrote linkerd CNI binaries to /host/opt/cni/bin Created CNI config /host/etc/cni/net.d/10-kindnet.conflist Done configuring CNI. Sleep=true @@ -1281,7 +1281,7 @@ Make sure multicluster extension is correctly installed and that the `links.multicluster.linkerd.io` CRD is present. ```bash -$ kubectl get crds | grep multicluster +kubectl get crds | grep multicluster NAME CREATED AT links.multicluster.linkerd.io 2021-03-10T09:58:10Z ``` @@ -1360,7 +1360,7 @@ the rules section. Expected rules for `linkerd-service-mirror-access-local-resources` cluster role: ```bash -$ kubectl --context=local get clusterrole linkerd-service-mirror-access-local-resources -o yaml +kubectl --context=local get clusterrole linkerd-service-mirror-access-local-resources -o yaml kind: ClusterRole metadata: labels: @@ -1393,7 +1393,7 @@ rules: Expected rules for `linkerd-service-mirror-read-remote-creds` role: ```bash -$ kubectl --context=local get role linkerd-service-mirror-read-remote-creds -n linkerd-multicluster -o yaml +kubectl --context=local get role linkerd-service-mirror-read-remote-creds -n linkerd-multicluster -o yaml kind: Role metadata: labels: @@ -1426,7 +1426,7 @@ everything to start up. If this is a permanent error, you'll want to validate the state of the controller pod with: ```bash -$ kubectl --all-namespaces get po --selector linkerd.io/control-plane-component=linkerd-service-mirror +kubectl --all-namespaces get po --selector linkerd.io/control-plane-component=linkerd-service-mirror NAME READY STATUS RESTARTS AGE linkerd-service-mirror-7bb8ff5967-zg265 2/2 Running 0 50m ``` @@ -1544,7 +1544,7 @@ Example failure: Ensure the linkerd-viz extension ClusterRoles exist: ```bash -$ kubectl get clusterroles | grep linkerd-viz +kubectl get clusterroles | grep linkerd-viz linkerd-linkerd-viz-metrics-api 2021-01-26T18:02:17Z linkerd-linkerd-viz-prometheus 2021-01-26T18:02:17Z linkerd-linkerd-viz-tap 2021-01-26T18:02:17Z @@ -1555,7 +1555,7 @@ linkerd-linkerd-viz-web-check 2021-01-2 Also ensure you have permission to create ClusterRoles: ```bash -$ kubectl auth can-i create clusterroles +kubectl auth can-i create clusterroles yes ``` @@ -1572,7 +1572,7 @@ Example failure: Ensure the linkerd-viz extension ClusterRoleBindings exist: ```bash -$ kubectl get clusterrolebindings | grep linkerd-viz +kubectl get clusterrolebindings | grep linkerd-viz linkerd-linkerd-viz-metrics-api ClusterRole/linkerd-linkerd-viz-metrics-api 18h linkerd-linkerd-viz-prometheus ClusterRole/linkerd-linkerd-viz-prometheus 18h linkerd-linkerd-viz-tap ClusterRole/linkerd-linkerd-viz-tap 18h @@ -1584,7 +1584,7 @@ linkerd-linkerd-viz-web-check ClusterRole/linkerd-linke Also ensure you have permission to create ClusterRoleBindings: ```bash -$ kubectl auth can-i create clusterrolebindings +kubectl auth can-i create clusterrolebindings yes ``` @@ -1673,7 +1673,7 @@ requirements in the cluster: Ensure all the linkerd-viz pods are injected ```bash -$ kubectl -n linkerd-viz get pods +kubectl -n linkerd-viz get pods NAME READY STATUS RESTARTS AGE grafana-68cddd7cc8-nrv4h 2/2 Running 3 18h metrics-api-77f684f7c7-hnw8r 2/2 Running 2 18h @@ -1697,7 +1697,7 @@ Make sure that the `proxy-injector` is working correctly by running Ensure all the linkerd-viz pods are running with 2/2 ```bash -$ kubectl -n linkerd-viz get pods +kubectl -n linkerd-viz get pods NAME READY STATUS RESTARTS AGE grafana-68cddd7cc8-nrv4h 2/2 Running 3 18h metrics-api-77f684f7c7-hnw8r 2/2 Running 2 18h @@ -1880,7 +1880,7 @@ versions in sync by updating either the CLI or linkerd-jaeger as necessary. Ensure all the jaeger pods are injected ```bash -$ kubectl -n linkerd-jaeger get pods +kubectl -n linkerd-jaeger get pods NAME READY STATUS RESTARTS AGE collector-69cc44dfbc-rhpfg 2/2 Running 0 11s jaeger-6f98d5c979-scqlq 2/2 Running 0 11s @@ -1901,7 +1901,7 @@ Make sure that the `proxy-injector` is working correctly by running Ensure all the linkerd-jaeger pods are running with 2/2 ```bash -$ kubectl -n linkerd-jaeger get pods +kubectl -n linkerd-jaeger get pods NAME READY STATUS RESTARTS AGE jaeger-injector-548684d74b-bcq5h 2/2 Running 0 5s collector-69cc44dfbc-wqf6s 2/2 Running 0 5s @@ -1950,7 +1950,7 @@ Ensure you can connect to the Linkerd Buoyant version check endpoint from the environment the `linkerd` cli is running: ```bash -$ curl https://buoyant.cloud/version.json +curl https://buoyant.cloud/version.json {"linkerd-buoyant":"v0.4.4"} ``` @@ -2015,7 +2015,7 @@ linkerd-buoyant install | kubectl apply -f - Ensure that the cluster role exists: ```bash -$ kubectl get clusterrole buoyant-cloud-agent +kubectl get clusterrole buoyant-cloud-agent NAME CREATED AT buoyant-cloud-agent 2020-11-13T00:59:50Z ``` @@ -2023,7 +2023,7 @@ buoyant-cloud-agent 2020-11-13T00:59:50Z Also ensure you have permission to create ClusterRoles: ```bash -$ kubectl auth can-i create ClusterRoles +kubectl auth can-i create ClusterRoles yes ``` @@ -2038,7 +2038,7 @@ yes Ensure that the cluster role binding exists: ```bash -$ kubectl get clusterrolebinding buoyant-cloud-agent +kubectl get clusterrolebinding buoyant-cloud-agent NAME ROLE AGE buoyant-cloud-agent ClusterRole/buoyant-cloud-agent 301d ``` @@ -2046,7 +2046,7 @@ buoyant-cloud-agent ClusterRole/buoyant-cloud-agent 301d Also ensure you have permission to create ClusterRoleBindings: ```bash -$ kubectl auth can-i create ClusterRoleBindings +kubectl auth can-i create ClusterRoleBindings yes ``` @@ -2061,7 +2061,7 @@ yes Ensure that the service account exists: ```bash -$ kubectl -n buoyant-cloud get serviceaccount buoyant-cloud-agent +kubectl -n buoyant-cloud get serviceaccount buoyant-cloud-agent NAME SECRETS AGE buoyant-cloud-agent 1 301d ``` @@ -2069,7 +2069,7 @@ buoyant-cloud-agent 1 301d Also ensure you have permission to create ServiceAccounts: ```bash -$ kubectl -n buoyant-cloud auth can-i create ServiceAccount +kubectl -n buoyant-cloud auth can-i create ServiceAccount yes ``` @@ -2084,7 +2084,7 @@ yes Ensure that the secret exists: ```bash -$ kubectl -n buoyant-cloud get secret buoyant-cloud-id +kubectl -n buoyant-cloud get secret buoyant-cloud-id NAME TYPE DATA AGE buoyant-cloud-id Opaque 4 301d ``` @@ -2092,7 +2092,7 @@ buoyant-cloud-id Opaque 4 301d Also ensure you have permission to create ServiceAccounts: ```bash -$ kubectl -n buoyant-cloud auth can-i create ServiceAccount +kubectl -n buoyant-cloud auth can-i create ServiceAccount yes ``` @@ -2130,7 +2130,7 @@ everything to start up. If this is a permanent error, you'll want to validate the state of the `buoyant-cloud-agent` Deployment with: ```bash -$ kubectl -n buoyant-cloud get po --selector app=buoyant-cloud-agent +kubectl -n buoyant-cloud get po --selector app=buoyant-cloud-agent NAME READY STATUS RESTARTS AGE buoyant-cloud-agent-6b8c6888d7-htr7d 2/2 Running 0 156m ``` @@ -2153,7 +2153,7 @@ Ensure the `buoyant-cloud-agent` pod is injected, the `READY` column should show `2/2`: ```bash -$ kubectl -n buoyant-cloud get pods --selector app=buoyant-cloud-agent +kubectl -n buoyant-cloud get pods --selector app=buoyant-cloud-agent NAME READY STATUS RESTARTS AGE buoyant-cloud-agent-6b8c6888d7-htr7d 2/2 Running 0 161m ``` @@ -2172,7 +2172,7 @@ Make sure that the `proxy-injector` is working correctly by running Check the version with: ```bash -$ linkerd-buoyant version +linkerd-buoyant version CLI version: v0.4.4 Agent version: v0.4.4 ``` @@ -2231,7 +2231,7 @@ everything to start up. If this is a permanent error, you'll want to validate the state of the `buoyant-cloud-metrics` DaemonSet with: ```bash -$ kubectl -n buoyant-cloud get po --selector app=buoyant-cloud-metrics +kubectl -n buoyant-cloud get po --selector app=buoyant-cloud-metrics NAME READY STATUS RESTARTS AGE buoyant-cloud-metrics-kt9mv 2/2 Running 0 163m buoyant-cloud-metrics-q8jhj 2/2 Running 0 163m @@ -2257,7 +2257,7 @@ Ensure the `buoyant-cloud-metrics` pods are injected, the `READY` column should show `2/2`: ```bash -$ kubectl -n buoyant-cloud get pods --selector app=buoyant-cloud-metrics +kubectl -n buoyant-cloud get pods --selector app=buoyant-cloud-metrics NAME READY STATUS RESTARTS AGE buoyant-cloud-metrics-kt9mv 2/2 Running 0 166m buoyant-cloud-metrics-q8jhj 2/2 Running 0 166m @@ -2279,7 +2279,7 @@ Make sure that the `proxy-injector` is working correctly by running Check the version with: ```bash -$ kubectl -n buoyant-cloud get daemonset/buoyant-cloud-metrics -o jsonpath='{.metadata.labels}' +kubectl -n buoyant-cloud get daemonset/buoyant-cloud-metrics -o jsonpath='{.metadata.labels}' {"app.kubernetes.io/name":"metrics","app.kubernetes.io/part-of":"buoyant-cloud","app.kubernetes.io/version":"v0.4.4"} ``` diff --git a/linkerd.io/content/2.14/tasks/upgrade.md b/linkerd.io/content/2.14/tasks/upgrade.md index 32f921e829..14321d64fe 100644 --- a/linkerd.io/content/2.14/tasks/upgrade.md +++ b/linkerd.io/content/2.14/tasks/upgrade.md @@ -317,7 +317,7 @@ Find the release name you used for the `linkerd2` chart, and the namespace where this release stored its config: ```bash -$ helm ls -A +helm ls -A NAME NAMESPACE REVISION UPDATED STATUS CHART APP VERSION linkerd default 1 2021-11-22 17:14:50.751436374 -0500 -05 deployed linkerd2-2.11.1 stable-2.11.1 ``` @@ -350,18 +350,18 @@ the `linkerd-crds`, `linkerd-control-plane` and `linkerd-smi` charts: ```bash # First migrate the CRDs -$ helm -n default get manifest linkerd | \ +helm -n default get manifest linkerd | \ yq 'select(.kind == "CustomResourceDefinition") | .metadata.name' | \ grep -v '\-\-\-' | \ xargs -n1 sh -c \ 'kubectl annotate --overwrite crd/$0 meta.helm.sh/release-name=linkerd-crds meta.helm.sh/release-namespace=linkerd' # Special case for TrafficSplit (only use if you have TrafficSplit CRs) -$ kubectl annotate --overwrite crd/trafficsplits.split.smi-spec.io \ +kubectl annotate --overwrite crd/trafficsplits.split.smi-spec.io \ meta.helm.sh/release-name=linkerd-smi meta.helm.sh/release-namespace=linkerd-smi # Now migrate all the other resources -$ helm -n default get manifest linkerd | \ +helm -n default get manifest linkerd | \ yq 'select(.kind != "CustomResourceDefinition")' | \ yq '.kind, .metadata.name, .metadata.namespace' | \ grep -v '\-\-\-' | @@ -375,14 +375,14 @@ above. ```bash # First make sure you update the helm repo -$ helm repo up +helm repo up # Install the linkerd-crds chart -$ helm install linkerd-crds -n linkerd --create-namespace linkerd/linkerd-crds +helm install linkerd-crds -n linkerd --create-namespace linkerd/linkerd-crds # Install the linkerd-control-plane chart # (remember to add any customizations you retrieved above) -$ helm install linkerd-control-plane \ +helm install linkerd-control-plane \ -n linkerd \ --set-file identityTrustAnchorsPEM=ca.crt \ --set-file identity.issuer.tls.crtPEM=issuer.crt \ @@ -390,8 +390,8 @@ $ helm install linkerd-control-plane \ linkerd/linkerd-control-plane # Optional: if using TrafficSplit CRs -$ helm repo add l5d-smi https://linkerd.github.io/linkerd-smi -$ helm install linkerd-smi -n linkerd-smi --create-namespace l5d-smi/linkerd-smi +helm repo add l5d-smi https://linkerd.github.io/linkerd-smi +helm install linkerd-smi -n linkerd-smi --create-namespace l5d-smi/linkerd-smi ``` ##### Cleaning up the old linkerd2 Helm release @@ -402,7 +402,7 @@ remove the Helm release config for the old `linkerd2` chart (assuming you used the "Secret" storage backend, which is the default): ```bash -$ kubectl -n default delete secret \ +kubectl -n default delete secret \ --field-selector type=helm.sh/release.v1 \ -l name=linkerd,owner=helm ``` diff --git a/linkerd.io/content/2.15/reference/cli/check.md b/linkerd.io/content/2.15/reference/cli/check.md index 7cd61cd237..67a2486908 100644 --- a/linkerd.io/content/2.15/reference/cli/check.md +++ b/linkerd.io/content/2.15/reference/cli/check.md @@ -12,7 +12,7 @@ for a full list of all the possible checks, what they do and how to fix them. ## Example output ```bash -$ linkerd check +linkerd check kubernetes-api -------------- √ can initialize the client diff --git a/linkerd.io/content/2.15/reference/iptables.md b/linkerd.io/content/2.15/reference/iptables.md index 67a7ea89de..9b4d229a59 100644 --- a/linkerd.io/content/2.15/reference/iptables.md +++ b/linkerd.io/content/2.15/reference/iptables.md @@ -164,7 +164,7 @@ Alternatively, if you want to inspect the iptables rules created for a pod, you can retrieve them through the following command: ```bash -$ kubectl -n logs linkerd-init +kubectl -n logs linkerd-init # where is the name of the pod # you want to see the iptables rules for ``` diff --git a/linkerd.io/content/2.15/tasks/configuring-dynamic-request-routing.md b/linkerd.io/content/2.15/tasks/configuring-dynamic-request-routing.md index c38f2a438c..e554bc6ac4 100644 --- a/linkerd.io/content/2.15/tasks/configuring-dynamic-request-routing.md +++ b/linkerd.io/content/2.15/tasks/configuring-dynamic-request-routing.md @@ -67,7 +67,7 @@ Requests to `/echo` on port 9898 to the frontend pod will get forwarded the pod pointed by the Service `backend-a-podinfo`: ```bash -$ curl -sX POST localhost:9898/echo \ +curl -sX POST localhost:9898/echo \ | grep -o 'PODINFO_UI_MESSAGE=. backend' PODINFO_UI_MESSAGE=A backend @@ -161,7 +161,7 @@ to the `backend-a-podinfo` Service. The previous requests should still reach `backend-a-podinfo` only: ```bash -$ curl -sX POST localhost:9898/echo \ +curl -sX POST localhost:9898/echo \ | grep -o 'PODINFO_UI_MESSAGE=. backend' PODINFO_UI_MESSAGE=A backend @@ -171,7 +171,7 @@ But if we add the "`x-request-id: alternative`" header they get routed to `backend-b-podinfo`: ```bash -$ curl -sX POST \ +curl -sX POST \ -H 'x-request-id: alternative' \ localhost:9898/echo \ | grep -o 'PODINFO_UI_MESSAGE=. backend' diff --git a/linkerd.io/content/2.15/tasks/configuring-per-route-policy.md b/linkerd.io/content/2.15/tasks/configuring-per-route-policy.md index a5c8b5c2ef..63b79fc6d4 100644 --- a/linkerd.io/content/2.15/tasks/configuring-per-route-policy.md +++ b/linkerd.io/content/2.15/tasks/configuring-per-route-policy.md @@ -30,7 +30,7 @@ haven't already done this. Inject and install the Books demo application: ```bash -$ kubectl create ns booksapp && \ +kubectl create ns booksapp && \ curl --proto '=https' --tlsv1.2 -sSfL https://run.linkerd.io/booksapp.yml \ | linkerd inject - \ | kubectl -n booksapp apply -f - @@ -44,21 +44,21 @@ run in the `booksapp` namespace. Confirm that the Linkerd data plane was injected successfully: ```bash -$ linkerd check -n booksapp --proxy -o short +linkerd check -n booksapp --proxy -o short ``` You can take a quick look at all the components that were added to your cluster by running: ```bash -$ kubectl -n booksapp get all +kubectl -n booksapp get all ``` Once the rollout has completed successfully, you can access the app itself by port-forwarding `webapp` locally: ```bash -$ kubectl -n booksapp port-forward svc/webapp 7000 & +kubectl -n booksapp port-forward svc/webapp 7000 & ``` Open [http://localhost:7000/](http://localhost:7000/) in your browser to see the @@ -87,7 +87,7 @@ First, let's run the `linkerd viz authz` command to list the authorization resources that currently exist for the `authors` deployment: ```bash -$ linkerd viz authz -n booksapp deploy/authors +linkerd viz authz -n booksapp deploy/authors ROUTE SERVER AUTHORIZATION UNAUTHORIZED SUCCESS RPS LATENCY_P50 LATENCY_P95 LATENCY_P99 default default:all-unauthenticated default/all-unauthenticated 0.0rps 70.31% 8.1rps 1ms 43ms 49ms probe default:all-unauthenticated default/probe 0.0rps 100.00% 0.3rps 1ms 1ms 1ms @@ -124,7 +124,7 @@ Now that we've defined a [`Server`] for the authors `Deployment`, we can run the currently unauthorized: ```bash -$ linkerd viz authz -n booksapp deploy/authors +linkerd viz authz -n booksapp deploy/authors ROUTE SERVER AUTHORIZATION UNAUTHORIZED SUCCESS RPS LATENCY_P50 LATENCY_P95 LATENCY_P99 default authors-server 9.5rps 0.00% 0.0rps 0ms 0ms 0ms probe authors-server default/probe 0.0rps 100.00% 0.1rps 1ms 1ms 1ms @@ -312,7 +312,7 @@ network (0.0.0.0). Running `linkerd viz authz` again, we can now see that our new policies exist: ```bash -$ linkerd viz authz -n booksapp deploy/authors +linkerd viz authz -n booksapp deploy/authors ROUTE SERVER AUTHORIZATION UNAUTHORIZED SUCCESS RPS LATENCY_P50 LATENCY_P95 LATENCY_P99 authors-get-route authors-server authorizationpolicy/authors-get-policy 0.0rps 100.00% 0.1rps 2ms 2ms 2ms authors-probe-route authors-server authorizationpolicy/authors-probe-policy 0.0rps 100.00% 0.1rps 1ms 1ms 1ms @@ -383,7 +383,7 @@ requests, but we haven't _authorized_ requests to that route. Running the requests to `authors-modify-route`: ```bash -$ linkerd viz authz -n booksapp deploy/authors +linkerd viz authz -n booksapp deploy/authors ROUTE SERVER AUTHORIZATION UNAUTHORIZED SUCCESS RPS LATENCY_P50 LATENCY_P95 LATENCY_P99 authors-get-route authors-server authorizationpolicy/authors-get-policy - - - - - - authors-modify-route authors-server 9.7rps 0.00% 0.0rps 0ms 0ms 0ms @@ -442,7 +442,7 @@ Running the `linkerd viz authz` command one last time, we now see that all traffic is authorized: ```bash -$ linkerd viz authz -n booksapp deploy/authors +linkerd viz authz -n booksapp deploy/authors ROUTE SERVER AUTHORIZATION UNAUTHORIZED SUCCESS RPS LATENCY_P50 LATENCY_P95 LATENCY_P99 authors-get-route authors-server authorizationpolicy/authors-get-policy 0.0rps 100.00% 0.1rps 0ms 0ms 0ms authors-modify-route authors-server authorizationpolicy/authors-modify-policy 0.0rps 100.00% 0.0rps 0ms 0ms 0ms diff --git a/linkerd.io/content/2.15/tasks/getting-per-route-metrics.md b/linkerd.io/content/2.15/tasks/getting-per-route-metrics.md index 34ee2bff6a..c2db8c0965 100644 --- a/linkerd.io/content/2.15/tasks/getting-per-route-metrics.md +++ b/linkerd.io/content/2.15/tasks/getting-per-route-metrics.md @@ -24,7 +24,7 @@ per-route authorization. You can view per-route metrics in the CLI by running `linkerd viz routes`: ```bash -$ linkerd viz routes svc/webapp +linkerd viz routes svc/webapp ROUTE SERVICE SUCCESS RPS LATENCY_P50 LATENCY_P95 LATENCY_P99 GET / webapp 100.00% 0.6rps 25ms 30ms 30ms GET /authors/{id} webapp 100.00% 0.6rps 22ms 29ms 30ms @@ -44,7 +44,7 @@ specified in your service profile will end up there. It is also possible to look the metrics up by other resource types, such as: ```bash -$ linkerd viz routes deploy/webapp +linkerd viz routes deploy/webapp ROUTE SERVICE SUCCESS RPS LATENCY_P50 LATENCY_P95 LATENCY_P99 [DEFAULT] kubernetes 0.00% 0.0rps 0ms 0ms 0ms GET / webapp 100.00% 0.5rps 27ms 38ms 40ms @@ -63,7 +63,7 @@ Then, it is possible to filter all the way down to requests going from a specific resource to other services: ```bash -$ linkerd viz routes deploy/webapp --to svc/books +linkerd viz routes deploy/webapp --to svc/books ROUTE SERVICE SUCCESS RPS LATENCY_P50 LATENCY_P95 LATENCY_P99 DELETE /books/{id}.json books 100.00% 0.5rps 18ms 29ms 30ms GET /books.json books 100.00% 1.1rps 7ms 12ms 18ms diff --git a/linkerd.io/content/2.15/tasks/multicluster-using-statefulsets.md b/linkerd.io/content/2.15/tasks/multicluster-using-statefulsets.md index 9d8730b5b0..c720c09563 100644 --- a/linkerd.io/content/2.15/tasks/multicluster-using-statefulsets.md +++ b/linkerd.io/content/2.15/tasks/multicluster-using-statefulsets.md @@ -48,8 +48,8 @@ The first step is to clone the demo repository on your local machine. ```sh # clone example repository -$ git clone git@github.com:mateiidavid/l2d-k3d-statefulset.git -$ cd l2d-k3d-statefulset +git clone git@github.com:mateiidavid/l2d-k3d-statefulset.git +cd l2d-k3d-statefulset ``` The second step consists of creating two `k3d` clusters named `east` and `west`, @@ -60,10 +60,10 @@ everything. ```sh # create k3d clusters -$ ./create.sh +./create.sh # list the clusters -$ k3d cluster list +k3d cluster list NAME SERVERS AGENTS LOADBALANCER east 1/1 0/0 true west 1/1 0/0 true @@ -78,10 +78,10 @@ provided scripts, but feel free to have a look! ```sh # Install Linkerd and multicluster, output to check should be a success -$ ./install.sh +./install.sh # Next, link the two clusters together -$ ./link.sh +./link.sh ``` Perfect! If you've made it this far with no errors, then it's a good sign. In @@ -101,17 +101,17 @@ communication. First, we will deploy our pods and services: ```sh # deploy services and mesh namespaces -$ ./deploy.sh +./deploy.sh # verify both clusters # # verify east -$ kubectl --context=k3d-east get pods +kubectl --context=k3d-east get pods NAME READY STATUS RESTARTS AGE curl-56dc7d945d-96r6p 2/2 Running 0 7s # verify west has headless service -$ kubectl --context=k3d-west get services +kubectl --context=k3d-west get services NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE kubernetes ClusterIP 10.43.0.1 443/TCP 10m nginx-svc ClusterIP None 80/TCP 8s @@ -119,7 +119,7 @@ nginx-svc ClusterIP None 80/TCP 8s # verify west has statefulset # # this may take a while to come up -$ kubectl --context=k3d-west get pods +kubectl --context=k3d-west get pods NAME READY STATUS RESTARTS AGE nginx-set-0 2/2 Running 0 53s nginx-set-1 2/2 Running 0 43s @@ -130,7 +130,7 @@ Before we go further, let's have a look at the endpoints object for the `nginx-svc`: ```sh -$ kubectl --context=k3d-west get endpoints nginx-svc -o yaml +kubectl --context=k3d-west get endpoints nginx-svc -o yaml ... subsets: - addresses: @@ -170,23 +170,23 @@ would get an answer back. We can test this out by applying the curl pod to the `west` cluster: ```sh -$ kubectl --context=k3d-west apply -f east/curl.yml -$ kubectl --context=k3d-west get pods +kubectl --context=k3d-west apply -f east/curl.yml +kubectl --context=k3d-west get pods NAME READY STATUS RESTARTS AGE nginx-set-0 2/2 Running 0 5m8s nginx-set-1 2/2 Running 0 4m58s nginx-set-2 2/2 Running 0 4m51s curl-56dc7d945d-s4n8j 0/2 PodInitializing 0 4s -$ kubectl --context=k3d-west exec -it curl-56dc7d945d-s4n8j -c curl -- bin/sh -/$ # prompt for curl pod +kubectl --context=k3d-west exec -it curl-56dc7d945d-s4n8j -c curl -- bin/sh +/# prompt for curl pod ``` If we now curl one of these instances, we will get back a response. ```sh # exec'd on the pod -/ $ curl nginx-set-0.nginx-svc.default.svc.west.cluster.local +/ curl nginx-set-0.nginx-svc.default.svc.west.cluster.local " @@ -218,10 +218,10 @@ Now, let's do the same, but this time from the `east` cluster. We will first export the service. ```sh -$ kubectl --context=k3d-west label service nginx-svc mirror.linkerd.io/exported="true" +kubectl --context=k3d-west label service nginx-svc mirror.linkerd.io/exported="true" service/nginx-svc labeled -$ kubectl --context=k3d-east get services +kubectl --context=k3d-east get services NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE kubernetes ClusterIP 10.43.0.1 443/TCP 20h nginx-svc-west ClusterIP None 80/TCP 29s @@ -235,7 +235,7 @@ endpoints for `nginx-svc-west` will have the same hostnames, but each hostname will point to one of the services we see above: ```sh -$ kubectl --context=k3d-east get endpoints nginx-svc-west -o yaml +kubectl --context=k3d-east get endpoints nginx-svc-west -o yaml subsets: - addresses: - hostname: nginx-set-0 @@ -251,17 +251,17 @@ cluster (`west`), will be mirrored as a clusterIP service. We will see in a second why this matters. ```sh -$ kubectl --context=k3d-east get pods +kubectl --context=k3d-east get pods NAME READY STATUS RESTARTS AGE curl-56dc7d945d-96r6p 2/2 Running 0 23m # exec and curl -$ kubectl --context=k3d-east exec pod curl-56dc7d945d-96r6p -it -c curl -- bin/sh +kubectl --context=k3d-east exec pod curl-56dc7d945d-96r6p -it -c curl -- bin/sh # we want to curl the same hostname we see in the endpoints object above. # however, the service and cluster domain will now be different, since we # are in a different cluster. # -/ $ curl nginx-set-0.nginx-svc-west.default.svc.east.cluster.local +/ curl nginx-set-0.nginx-svc-west.default.svc.east.cluster.local @@ -329,8 +329,8 @@ validation. To clean-up, you can remove both clusters entirely using the k3d CLI: ```sh -$ k3d cluster delete east +k3d cluster delete east cluster east deleted -$ k3d cluster delete west +k3d cluster delete west cluster west deleted ``` diff --git a/linkerd.io/content/2.15/tasks/restricting-access.md b/linkerd.io/content/2.15/tasks/restricting-access.md index 0b0b0c94b7..38ebdaeb3d 100644 --- a/linkerd.io/content/2.15/tasks/restricting-access.md +++ b/linkerd.io/content/2.15/tasks/restricting-access.md @@ -21,9 +21,9 @@ haven't already done this. Inject and install the Emojivoto application: ```bash -$ linkerd inject https://run.linkerd.io/emojivoto.yml | kubectl apply -f - +linkerd inject https://run.linkerd.io/emojivoto.yml | kubectl apply -f - ... -$ linkerd check -n emojivoto --proxy -o short +linkerd check -n emojivoto --proxy -o short ... ``` diff --git a/linkerd.io/content/2.15/tasks/securing-linkerd-tap.md b/linkerd.io/content/2.15/tasks/securing-linkerd-tap.md index 8a802c890c..639f81692f 100644 --- a/linkerd.io/content/2.15/tasks/securing-linkerd-tap.md +++ b/linkerd.io/content/2.15/tasks/securing-linkerd-tap.md @@ -60,7 +60,7 @@ kubectl auth can-i watch deployments.tap.linkerd.io -n emojivoto --as $(whoami) You can also use the Linkerd CLI's `--as` flag to confirm: ```bash -$ linkerd viz tap -n linkerd deploy/linkerd-controller --as $(whoami) +linkerd viz tap -n linkerd deploy/linkerd-controller --as $(whoami) Cannot connect to Linkerd Viz: namespaces is forbidden: User "XXXX" cannot list resource "namespaces" in API group "" at the cluster scope Validate the install with: linkerd viz check ... @@ -77,7 +77,7 @@ To enable tap access to all resources in all namespaces, you may bind your user to the `linkerd-linkerd-tap-admin` ClusterRole, installed by default: ```bash -$ kubectl describe clusterroles/linkerd-linkerd-viz-tap-admin +kubectl describe clusterroles/linkerd-linkerd-viz-tap-admin Name: linkerd-linkerd-viz-tap-admin Labels: component=tap linkerd.io/extension=viz @@ -109,7 +109,7 @@ kubectl create clusterrolebinding \ You can verify you now have tap access with: ```bash -$ linkerd viz tap -n linkerd deploy/linkerd-controller --as $(whoami) +linkerd viz tap -n linkerd deploy/linkerd-controller --as $(whoami) req id=3:0 proxy=in src=10.244.0.1:37392 dst=10.244.0.13:9996 tls=not_provided_by_remote :method=GET :authority=10.244.0.13:9996 :path=/ping ... ``` @@ -143,14 +143,14 @@ Because GCloud provides this additional level of access, there are cases where not. To validate this, check whether your GCloud user has Tap access: ```bash -$ kubectl auth can-i watch pods.tap.linkerd.io --all-namespaces +kubectl auth can-i watch pods.tap.linkerd.io --all-namespaces yes ``` And then validate whether your RBAC user has Tap access: ```bash -$ kubectl auth can-i watch pods.tap.linkerd.io --all-namespaces --as $(gcloud config get-value account) +kubectl auth can-i watch pods.tap.linkerd.io --all-namespaces --as $(gcloud config get-value account) no - no RBAC policy matched ``` @@ -187,14 +187,14 @@ privileges necessary to tap resources. To confirm: ```bash -$ kubectl auth can-i watch pods.tap.linkerd.io --all-namespaces --as system:serviceaccount:linkerd-viz:web +kubectl auth can-i watch pods.tap.linkerd.io --all-namespaces --as system:serviceaccount:linkerd-viz:web yes ``` This access is enabled via a `linkerd-linkerd-viz-web-admin` ClusterRoleBinding: ```bash -$ kubectl describe clusterrolebindings/linkerd-linkerd-viz-web-admin +kubectl describe clusterrolebindings/linkerd-linkerd-viz-web-admin Name: linkerd-linkerd-viz-web-admin Labels: component=web linkerd.io/extensions=viz @@ -227,6 +227,6 @@ kubectl delete clusterrolebindings/linkerd-linkerd-viz-web-admin To confirm: ```bash -$ kubectl auth can-i watch pods.tap.linkerd.io --all-namespaces --as system:serviceaccount:linkerd-viz:web +kubectl auth can-i watch pods.tap.linkerd.io --all-namespaces --as system:serviceaccount:linkerd-viz:web no ``` diff --git a/linkerd.io/content/2.15/tasks/troubleshooting.md b/linkerd.io/content/2.15/tasks/troubleshooting.md index bc58809cf8..e8fd89470b 100644 --- a/linkerd.io/content/2.15/tasks/troubleshooting.md +++ b/linkerd.io/content/2.15/tasks/troubleshooting.md @@ -230,7 +230,7 @@ Example failure: Ensure the Linkerd ClusterRoles exist: ```bash -$ kubectl get clusterroles | grep linkerd +kubectl get clusterroles | grep linkerd linkerd-linkerd-destination 9d linkerd-linkerd-identity 9d linkerd-linkerd-proxy-injector 9d @@ -240,7 +240,7 @@ linkerd-policy 9d Also ensure you have permission to create ClusterRoles: ```bash -$ kubectl auth can-i create clusterroles +kubectl auth can-i create clusterroles yes ``` @@ -257,7 +257,7 @@ Example failure: Ensure the Linkerd ClusterRoleBindings exist: ```bash -$ kubectl get clusterrolebindings | grep linkerd +kubectl get clusterrolebindings | grep linkerd linkerd-linkerd-destination 9d linkerd-linkerd-identity 9d linkerd-linkerd-proxy-injector 9d @@ -267,7 +267,7 @@ linkerd-destination-policy 9d Also ensure you have permission to create ClusterRoleBindings: ```bash -$ kubectl auth can-i create clusterrolebindings +kubectl auth can-i create clusterrolebindings yes ``` @@ -284,7 +284,7 @@ Example failure: Ensure the Linkerd ServiceAccounts exist: ```bash -$ kubectl -n linkerd get serviceaccounts +kubectl -n linkerd get serviceaccounts NAME SECRETS AGE default 1 14m linkerd-destination 1 14m @@ -297,7 +297,7 @@ Also ensure you have permission to create ServiceAccounts in the Linkerd namespace: ```bash -$ kubectl -n linkerd auth can-i create serviceaccounts +kubectl -n linkerd auth can-i create serviceaccounts yes ``` @@ -314,7 +314,7 @@ Example failure: Ensure the Linkerd CRD exists: ```bash -$ kubectl get customresourcedefinitions +kubectl get customresourcedefinitions NAME CREATED AT serviceprofiles.linkerd.io 2019-04-25T21:47:31Z ``` @@ -322,7 +322,7 @@ serviceprofiles.linkerd.io 2019-04-25T21:47:31Z Also ensure you have permission to create CRDs: ```bash -$ kubectl auth can-i create customresourcedefinitions +kubectl auth can-i create customresourcedefinitions yes ``` @@ -339,14 +339,14 @@ Example failure: Ensure the Linkerd MutatingWebhookConfigurations exists: ```bash -$ kubectl get mutatingwebhookconfigurations | grep linkerd +kubectl get mutatingwebhookconfigurations | grep linkerd linkerd-proxy-injector-webhook-config 2019-07-01T13:13:26Z ``` Also ensure you have permission to create MutatingWebhookConfigurations: ```bash -$ kubectl auth can-i create mutatingwebhookconfigurations +kubectl auth can-i create mutatingwebhookconfigurations yes ``` @@ -363,14 +363,14 @@ Example failure: Ensure the Linkerd ValidatingWebhookConfiguration exists: ```bash -$ kubectl get validatingwebhookconfigurations | grep linkerd +kubectl get validatingwebhookconfigurations | grep linkerd linkerd-sp-validator-webhook-config 2019-07-01T13:13:26Z ``` Also ensure you have permission to create ValidatingWebhookConfigurations: ```bash -$ kubectl auth can-i create validatingwebhookconfigurations +kubectl auth can-i create validatingwebhookconfigurations yes ``` @@ -418,7 +418,7 @@ Example failure: Ensure the Linkerd ConfigMap exists: ```bash -$ kubectl -n linkerd get configmap/linkerd-config +kubectl -n linkerd get configmap/linkerd-config NAME DATA AGE linkerd-config 3 61m ``` @@ -426,7 +426,7 @@ linkerd-config 3 61m Also ensure you have permission to create ConfigMaps: ```bash -$ kubectl -n linkerd auth can-i create configmap +kubectl -n linkerd auth can-i create configmap yes ``` @@ -780,7 +780,7 @@ Example failure: Verify the state of the control plane pods with: ```bash -$ kubectl -n linkerd get po +kubectl -n linkerd get po NAME READY STATUS RESTARTS AGE linkerd-destination-5fd7b5d466-szgqm 2/2 Running 1 12m linkerd-identity-54df78c479-hbh5m 2/2 Running 0 12m @@ -862,7 +862,7 @@ Ensure you can connect to the Linkerd version check endpoint from the environment the `linkerd` cli is running: ```bash -$ curl "https://versioncheck.linkerd.io/version.json?version=edge-19.1.2&uuid=test-uuid&source=cli" +curl "https://versioncheck.linkerd.io/version.json?version=edge-19.1.2&uuid=test-uuid&source=cli" {"stable":"stable-2.1.0","edge":"edge-19.1.2"} ``` @@ -961,7 +961,7 @@ normally. Example failure: ```bash -$ linkerd check --proxy --namespace foo +linkerd check --proxy --namespace foo ... × data plane namespace exists The "foo" namespace does not exist @@ -1133,7 +1133,7 @@ Example error: Ensure that the linkerd-cni-config ConfigMap exists in the CNI namespace: ```bash -$ kubectl get cm linkerd-cni-config -n linkerd-cni +kubectl get cm linkerd-cni-config -n linkerd-cni NAME PRIV CAPS SELINUX RUNASUSER FSGROUP SUPGROUP READONLYROOTFS VOLUMES linkerd-linkerd-cni-cni false RunAsAny RunAsAny RunAsAny RunAsAny false hostPath,secret ``` @@ -1141,7 +1141,7 @@ linkerd-linkerd-cni-cni false RunAsAny RunAsAny RunAsAny RunAs Also ensure you have permission to create ConfigMaps: ```bash -$ kubectl auth can-i create ConfigMaps +kubectl auth can-i create ConfigMaps yes ``` @@ -1158,7 +1158,7 @@ Example error: Ensure that the cluster role exists: ```bash -$ kubectl get clusterrole linkerd-cni +kubectl get clusterrole linkerd-cni NAME AGE linkerd-cni 54m ``` @@ -1166,7 +1166,7 @@ linkerd-cni 54m Also ensure you have permission to create ClusterRoles: ```bash -$ kubectl auth can-i create ClusterRoles +kubectl auth can-i create ClusterRoles yes ``` @@ -1183,7 +1183,7 @@ Example error: Ensure that the cluster role binding exists: ```bash -$ kubectl get clusterrolebinding linkerd-cni +kubectl get clusterrolebinding linkerd-cni NAME AGE linkerd-cni 54m ``` @@ -1191,7 +1191,7 @@ linkerd-cni 54m Also ensure you have permission to create ClusterRoleBindings: ```bash -$ kubectl auth can-i create ClusterRoleBindings +kubectl auth can-i create ClusterRoleBindings yes ``` @@ -1208,7 +1208,7 @@ Example error: Ensure that the CNI service account exists in the CNI namespace: ```bash -$ kubectl get ServiceAccount linkerd-cni -n linkerd-cni +kubectl get ServiceAccount linkerd-cni -n linkerd-cni NAME SECRETS AGE linkerd-cni 1 45m ``` @@ -1216,7 +1216,7 @@ linkerd-cni 1 45m Also ensure you have permission to create ServiceAccount: ```bash -$ kubectl auth can-i create ServiceAccounts -n linkerd-cni +kubectl auth can-i create ServiceAccounts -n linkerd-cni yes ``` @@ -1233,7 +1233,7 @@ Example error: Ensure that the CNI daemonset exists in the CNI namespace: ```bash -$ kubectl get ds -n linkerd-cni +kubectl get ds -n linkerd-cni NAME DESIRED CURRENT READY UP-TO-DATE AVAILABLE NODE SELECTOR AGE linkerd-cni 1 1 1 1 1 beta.kubernetes.io/os=linux 14m ``` @@ -1241,7 +1241,7 @@ linkerd-cni 1 1 1 1 1 beta.kubernet Also ensure you have permission to create DaemonSets: ```bash -$ kubectl auth can-i create DaemonSets -n linkerd-cni +kubectl auth can-i create DaemonSets -n linkerd-cni yes ``` @@ -1258,7 +1258,7 @@ Example failure: Ensure that all the CNI pods are running: ```bash -$ kubectl get po -n linkerd-cn +kubectl get po -n linkerd-cn NAME READY STATUS RESTARTS AGE linkerd-cni-rzp2q 1/1 Running 0 9m20s linkerd-cni-mf564 1/1 Running 0 9m22s @@ -1268,7 +1268,7 @@ linkerd-cni-p5670 1/1 Running 0 9m25s Ensure that all pods have finished the deployment of the CNI config and binary: ```bash -$ kubectl logs linkerd-cni-rzp2q -n linkerd-cni +kubectl logs linkerd-cni-rzp2q -n linkerd-cni Wrote linkerd CNI binaries to /host/opt/cni/bin Created CNI config /host/etc/cni/net.d/10-kindnet.conflist Done configuring CNI. Sleep=true @@ -1296,7 +1296,7 @@ Make sure multicluster extension is correctly installed and that the `links.multicluster.linkerd.io` CRD is present. ```bash -$ kubectl get crds | grep multicluster +kubectl get crds | grep multicluster NAME CREATED AT links.multicluster.linkerd.io 2021-03-10T09:58:10Z ``` @@ -1375,7 +1375,7 @@ the rules section. Expected rules for `linkerd-service-mirror-access-local-resources` cluster role: ```bash -$ kubectl --context=local get clusterrole linkerd-service-mirror-access-local-resources -o yaml +kubectl --context=local get clusterrole linkerd-service-mirror-access-local-resources -o yaml kind: ClusterRole metadata: labels: @@ -1408,7 +1408,7 @@ rules: Expected rules for `linkerd-service-mirror-read-remote-creds` role: ```bash -$ kubectl --context=local get role linkerd-service-mirror-read-remote-creds -n linkerd-multicluster -o yaml +kubectl --context=local get role linkerd-service-mirror-read-remote-creds -n linkerd-multicluster -o yaml kind: Role metadata: labels: @@ -1441,7 +1441,7 @@ everything to start up. If this is a permanent error, you'll want to validate the state of the controller pod with: ```bash -$ kubectl --all-namespaces get po --selector linkerd.io/control-plane-component=linkerd-service-mirror +kubectl --all-namespaces get po --selector linkerd.io/control-plane-component=linkerd-service-mirror NAME READY STATUS RESTARTS AGE linkerd-service-mirror-7bb8ff5967-zg265 2/2 Running 0 50m ``` @@ -1559,7 +1559,7 @@ Example failure: Ensure the linkerd-viz extension ClusterRoles exist: ```bash -$ kubectl get clusterroles | grep linkerd-viz +kubectl get clusterroles | grep linkerd-viz linkerd-linkerd-viz-metrics-api 2021-01-26T18:02:17Z linkerd-linkerd-viz-prometheus 2021-01-26T18:02:17Z linkerd-linkerd-viz-tap 2021-01-26T18:02:17Z @@ -1570,7 +1570,7 @@ linkerd-linkerd-viz-web-check 2021-01-2 Also ensure you have permission to create ClusterRoles: ```bash -$ kubectl auth can-i create clusterroles +kubectl auth can-i create clusterroles yes ``` @@ -1587,7 +1587,7 @@ Example failure: Ensure the linkerd-viz extension ClusterRoleBindings exist: ```bash -$ kubectl get clusterrolebindings | grep linkerd-viz +kubectl get clusterrolebindings | grep linkerd-viz linkerd-linkerd-viz-metrics-api ClusterRole/linkerd-linkerd-viz-metrics-api 18h linkerd-linkerd-viz-prometheus ClusterRole/linkerd-linkerd-viz-prometheus 18h linkerd-linkerd-viz-tap ClusterRole/linkerd-linkerd-viz-tap 18h @@ -1599,7 +1599,7 @@ linkerd-linkerd-viz-web-check ClusterRole/linkerd-linke Also ensure you have permission to create ClusterRoleBindings: ```bash -$ kubectl auth can-i create clusterrolebindings +kubectl auth can-i create clusterrolebindings yes ``` @@ -1688,7 +1688,7 @@ requirements in the cluster: Ensure all the linkerd-viz pods are injected ```bash -$ kubectl -n linkerd-viz get pods +kubectl -n linkerd-viz get pods NAME READY STATUS RESTARTS AGE grafana-68cddd7cc8-nrv4h 2/2 Running 3 18h metrics-api-77f684f7c7-hnw8r 2/2 Running 2 18h @@ -1712,7 +1712,7 @@ Make sure that the `proxy-injector` is working correctly by running Ensure all the linkerd-viz pods are running with 2/2 ```bash -$ kubectl -n linkerd-viz get pods +kubectl -n linkerd-viz get pods NAME READY STATUS RESTARTS AGE grafana-68cddd7cc8-nrv4h 2/2 Running 3 18h metrics-api-77f684f7c7-hnw8r 2/2 Running 2 18h @@ -1895,7 +1895,7 @@ versions in sync by updating either the CLI or linkerd-jaeger as necessary. Ensure all the jaeger pods are injected ```bash -$ kubectl -n linkerd-jaeger get pods +kubectl -n linkerd-jaeger get pods NAME READY STATUS RESTARTS AGE collector-69cc44dfbc-rhpfg 2/2 Running 0 11s jaeger-6f98d5c979-scqlq 2/2 Running 0 11s @@ -1916,7 +1916,7 @@ Make sure that the `proxy-injector` is working correctly by running Ensure all the linkerd-jaeger pods are running with 2/2 ```bash -$ kubectl -n linkerd-jaeger get pods +kubectl -n linkerd-jaeger get pods NAME READY STATUS RESTARTS AGE jaeger-injector-548684d74b-bcq5h 2/2 Running 0 5s collector-69cc44dfbc-wqf6s 2/2 Running 0 5s @@ -1965,7 +1965,7 @@ Ensure you can connect to the Linkerd Buoyant version check endpoint from the environment the `linkerd` cli is running: ```bash -$ curl https://buoyant.cloud/version.json +curl https://buoyant.cloud/version.json {"linkerd-buoyant":"v0.4.4"} ``` @@ -2030,7 +2030,7 @@ linkerd-buoyant install | kubectl apply -f - Ensure that the cluster role exists: ```bash -$ kubectl get clusterrole buoyant-cloud-agent +kubectl get clusterrole buoyant-cloud-agent NAME CREATED AT buoyant-cloud-agent 2020-11-13T00:59:50Z ``` @@ -2038,7 +2038,7 @@ buoyant-cloud-agent 2020-11-13T00:59:50Z Also ensure you have permission to create ClusterRoles: ```bash -$ kubectl auth can-i create ClusterRoles +kubectl auth can-i create ClusterRoles yes ``` @@ -2053,7 +2053,7 @@ yes Ensure that the cluster role binding exists: ```bash -$ kubectl get clusterrolebinding buoyant-cloud-agent +kubectl get clusterrolebinding buoyant-cloud-agent NAME ROLE AGE buoyant-cloud-agent ClusterRole/buoyant-cloud-agent 301d ``` @@ -2061,7 +2061,7 @@ buoyant-cloud-agent ClusterRole/buoyant-cloud-agent 301d Also ensure you have permission to create ClusterRoleBindings: ```bash -$ kubectl auth can-i create ClusterRoleBindings +kubectl auth can-i create ClusterRoleBindings yes ``` @@ -2076,7 +2076,7 @@ yes Ensure that the service account exists: ```bash -$ kubectl -n buoyant-cloud get serviceaccount buoyant-cloud-agent +kubectl -n buoyant-cloud get serviceaccount buoyant-cloud-agent NAME SECRETS AGE buoyant-cloud-agent 1 301d ``` @@ -2084,7 +2084,7 @@ buoyant-cloud-agent 1 301d Also ensure you have permission to create ServiceAccounts: ```bash -$ kubectl -n buoyant-cloud auth can-i create ServiceAccount +kubectl -n buoyant-cloud auth can-i create ServiceAccount yes ``` @@ -2099,7 +2099,7 @@ yes Ensure that the secret exists: ```bash -$ kubectl -n buoyant-cloud get secret buoyant-cloud-id +kubectl -n buoyant-cloud get secret buoyant-cloud-id NAME TYPE DATA AGE buoyant-cloud-id Opaque 4 301d ``` @@ -2107,7 +2107,7 @@ buoyant-cloud-id Opaque 4 301d Also ensure you have permission to create ServiceAccounts: ```bash -$ kubectl -n buoyant-cloud auth can-i create ServiceAccount +kubectl -n buoyant-cloud auth can-i create ServiceAccount yes ``` @@ -2145,7 +2145,7 @@ everything to start up. If this is a permanent error, you'll want to validate the state of the `buoyant-cloud-agent` Deployment with: ```bash -$ kubectl -n buoyant-cloud get po --selector app=buoyant-cloud-agent +kubectl -n buoyant-cloud get po --selector app=buoyant-cloud-agent NAME READY STATUS RESTARTS AGE buoyant-cloud-agent-6b8c6888d7-htr7d 2/2 Running 0 156m ``` @@ -2168,7 +2168,7 @@ Ensure the `buoyant-cloud-agent` pod is injected, the `READY` column should show `2/2`: ```bash -$ kubectl -n buoyant-cloud get pods --selector app=buoyant-cloud-agent +kubectl -n buoyant-cloud get pods --selector app=buoyant-cloud-agent NAME READY STATUS RESTARTS AGE buoyant-cloud-agent-6b8c6888d7-htr7d 2/2 Running 0 161m ``` @@ -2187,7 +2187,7 @@ Make sure that the `proxy-injector` is working correctly by running Check the version with: ```bash -$ linkerd-buoyant version +linkerd-buoyant version CLI version: v0.4.4 Agent version: v0.4.4 ``` @@ -2246,7 +2246,7 @@ everything to start up. If this is a permanent error, you'll want to validate the state of the `buoyant-cloud-metrics` DaemonSet with: ```bash -$ kubectl -n buoyant-cloud get po --selector app=buoyant-cloud-metrics +kubectl -n buoyant-cloud get po --selector app=buoyant-cloud-metrics NAME READY STATUS RESTARTS AGE buoyant-cloud-metrics-kt9mv 2/2 Running 0 163m buoyant-cloud-metrics-q8jhj 2/2 Running 0 163m @@ -2272,7 +2272,7 @@ Ensure the `buoyant-cloud-metrics` pods are injected, the `READY` column should show `2/2`: ```bash -$ kubectl -n buoyant-cloud get pods --selector app=buoyant-cloud-metrics +kubectl -n buoyant-cloud get pods --selector app=buoyant-cloud-metrics NAME READY STATUS RESTARTS AGE buoyant-cloud-metrics-kt9mv 2/2 Running 0 166m buoyant-cloud-metrics-q8jhj 2/2 Running 0 166m @@ -2294,7 +2294,7 @@ Make sure that the `proxy-injector` is working correctly by running Check the version with: ```bash -$ kubectl -n buoyant-cloud get daemonset/buoyant-cloud-metrics -o jsonpath='{.metadata.labels}' +kubectl -n buoyant-cloud get daemonset/buoyant-cloud-metrics -o jsonpath='{.metadata.labels}' {"app.kubernetes.io/name":"metrics","app.kubernetes.io/part-of":"buoyant-cloud","app.kubernetes.io/version":"v0.4.4"} ``` diff --git a/linkerd.io/content/2.15/tasks/upgrade.md b/linkerd.io/content/2.15/tasks/upgrade.md index 23547217a4..a73f4d54fc 100644 --- a/linkerd.io/content/2.15/tasks/upgrade.md +++ b/linkerd.io/content/2.15/tasks/upgrade.md @@ -379,7 +379,7 @@ Find the release name you used for the `linkerd2` chart, and the namespace where this release stored its config: ```bash -$ helm ls -A +helm ls -A NAME NAMESPACE REVISION UPDATED STATUS CHART APP VERSION linkerd default 1 2021-11-22 17:14:50.751436374 -0500 -05 deployed linkerd2-2.11.1 stable-2.11.1 ``` @@ -412,18 +412,18 @@ the `linkerd-crds`, `linkerd-control-plane` and `linkerd-smi` charts: ```bash # First migrate the CRDs -$ helm -n default get manifest linkerd | \ +helm -n default get manifest linkerd | \ yq 'select(.kind == "CustomResourceDefinition") | .metadata.name' | \ grep -v '\-\-\-' | \ xargs -n1 sh -c \ 'kubectl annotate --overwrite crd/$0 meta.helm.sh/release-name=linkerd-crds meta.helm.sh/release-namespace=linkerd' # Special case for TrafficSplit (only use if you have TrafficSplit CRs) -$ kubectl annotate --overwrite crd/trafficsplits.split.smi-spec.io \ +kubectl annotate --overwrite crd/trafficsplits.split.smi-spec.io \ meta.helm.sh/release-name=linkerd-smi meta.helm.sh/release-namespace=linkerd-smi # Now migrate all the other resources -$ helm -n default get manifest linkerd | \ +helm -n default get manifest linkerd | \ yq 'select(.kind != "CustomResourceDefinition")' | \ yq '.kind, .metadata.name, .metadata.namespace' | \ grep -v '\-\-\-' | @@ -437,14 +437,14 @@ above. ```bash # First make sure you update the helm repo -$ helm repo up +helm repo up # Install the linkerd-crds chart -$ helm install linkerd-crds -n linkerd --create-namespace linkerd/linkerd-crds +helm install linkerd-crds -n linkerd --create-namespace linkerd/linkerd-crds # Install the linkerd-control-plane chart # (remember to add any customizations you retrieved above) -$ helm install linkerd-control-plane \ +helm install linkerd-control-plane \ -n linkerd \ --set-file identityTrustAnchorsPEM=ca.crt \ --set-file identity.issuer.tls.crtPEM=issuer.crt \ @@ -452,8 +452,8 @@ $ helm install linkerd-control-plane \ linkerd/linkerd-control-plane # Optional: if using TrafficSplit CRs -$ helm repo add l5d-smi https://linkerd.github.io/linkerd-smi -$ helm install linkerd-smi -n linkerd-smi --create-namespace l5d-smi/linkerd-smi +helm repo add l5d-smi https://linkerd.github.io/linkerd-smi +helm install linkerd-smi -n linkerd-smi --create-namespace l5d-smi/linkerd-smi ``` ##### Cleaning up the old linkerd2 Helm release @@ -464,7 +464,7 @@ remove the Helm release config for the old `linkerd2` chart (assuming you used the "Secret" storage backend, which is the default): ```bash -$ kubectl -n default delete secret \ +kubectl -n default delete secret \ --field-selector type=helm.sh/release.v1 \ -l name=linkerd,owner=helm ``` diff --git a/linkerd.io/content/2.16/reference/cli/check.md b/linkerd.io/content/2.16/reference/cli/check.md index 7cd61cd237..67a2486908 100644 --- a/linkerd.io/content/2.16/reference/cli/check.md +++ b/linkerd.io/content/2.16/reference/cli/check.md @@ -12,7 +12,7 @@ for a full list of all the possible checks, what they do and how to fix them. ## Example output ```bash -$ linkerd check +linkerd check kubernetes-api -------------- √ can initialize the client diff --git a/linkerd.io/content/2.16/reference/iptables.md b/linkerd.io/content/2.16/reference/iptables.md index 67a7ea89de..9b4d229a59 100644 --- a/linkerd.io/content/2.16/reference/iptables.md +++ b/linkerd.io/content/2.16/reference/iptables.md @@ -164,7 +164,7 @@ Alternatively, if you want to inspect the iptables rules created for a pod, you can retrieve them through the following command: ```bash -$ kubectl -n logs linkerd-init +kubectl -n logs linkerd-init # where is the name of the pod # you want to see the iptables rules for ``` diff --git a/linkerd.io/content/2.16/tasks/configuring-dynamic-request-routing.md b/linkerd.io/content/2.16/tasks/configuring-dynamic-request-routing.md index 004b50ded6..a44d12a1a5 100644 --- a/linkerd.io/content/2.16/tasks/configuring-dynamic-request-routing.md +++ b/linkerd.io/content/2.16/tasks/configuring-dynamic-request-routing.md @@ -67,7 +67,7 @@ Requests to `/echo` on port 9898 to the frontend pod will get forwarded the pod pointed by the Service `backend-a-podinfo`: ```bash -$ curl -sX POST localhost:9898/echo \ +curl -sX POST localhost:9898/echo \ | grep -o 'PODINFO_UI_MESSAGE=. backend' PODINFO_UI_MESSAGE=A backend @@ -132,7 +132,7 @@ the `backend-a-podinfo` Service. The previous requests should still reach `backend-a-podinfo` only: ```bash -$ curl -sX POST localhost:9898/echo \ +curl -sX POST localhost:9898/echo \ | grep -o 'PODINFO_UI_MESSAGE=. backend' PODINFO_UI_MESSAGE=A backend @@ -142,7 +142,7 @@ But if we add the `x-request-id: alternative` header, they get routed to `backend-b-podinfo`: ```bash -$ curl -sX POST \ +curl -sX POST \ -H 'x-request-id: alternative' \ localhost:9898/echo \ | grep -o 'PODINFO_UI_MESSAGE=. backend' diff --git a/linkerd.io/content/2.16/tasks/configuring-per-route-policy.md b/linkerd.io/content/2.16/tasks/configuring-per-route-policy.md index a5c8b5c2ef..63b79fc6d4 100644 --- a/linkerd.io/content/2.16/tasks/configuring-per-route-policy.md +++ b/linkerd.io/content/2.16/tasks/configuring-per-route-policy.md @@ -30,7 +30,7 @@ haven't already done this. Inject and install the Books demo application: ```bash -$ kubectl create ns booksapp && \ +kubectl create ns booksapp && \ curl --proto '=https' --tlsv1.2 -sSfL https://run.linkerd.io/booksapp.yml \ | linkerd inject - \ | kubectl -n booksapp apply -f - @@ -44,21 +44,21 @@ run in the `booksapp` namespace. Confirm that the Linkerd data plane was injected successfully: ```bash -$ linkerd check -n booksapp --proxy -o short +linkerd check -n booksapp --proxy -o short ``` You can take a quick look at all the components that were added to your cluster by running: ```bash -$ kubectl -n booksapp get all +kubectl -n booksapp get all ``` Once the rollout has completed successfully, you can access the app itself by port-forwarding `webapp` locally: ```bash -$ kubectl -n booksapp port-forward svc/webapp 7000 & +kubectl -n booksapp port-forward svc/webapp 7000 & ``` Open [http://localhost:7000/](http://localhost:7000/) in your browser to see the @@ -87,7 +87,7 @@ First, let's run the `linkerd viz authz` command to list the authorization resources that currently exist for the `authors` deployment: ```bash -$ linkerd viz authz -n booksapp deploy/authors +linkerd viz authz -n booksapp deploy/authors ROUTE SERVER AUTHORIZATION UNAUTHORIZED SUCCESS RPS LATENCY_P50 LATENCY_P95 LATENCY_P99 default default:all-unauthenticated default/all-unauthenticated 0.0rps 70.31% 8.1rps 1ms 43ms 49ms probe default:all-unauthenticated default/probe 0.0rps 100.00% 0.3rps 1ms 1ms 1ms @@ -124,7 +124,7 @@ Now that we've defined a [`Server`] for the authors `Deployment`, we can run the currently unauthorized: ```bash -$ linkerd viz authz -n booksapp deploy/authors +linkerd viz authz -n booksapp deploy/authors ROUTE SERVER AUTHORIZATION UNAUTHORIZED SUCCESS RPS LATENCY_P50 LATENCY_P95 LATENCY_P99 default authors-server 9.5rps 0.00% 0.0rps 0ms 0ms 0ms probe authors-server default/probe 0.0rps 100.00% 0.1rps 1ms 1ms 1ms @@ -312,7 +312,7 @@ network (0.0.0.0). Running `linkerd viz authz` again, we can now see that our new policies exist: ```bash -$ linkerd viz authz -n booksapp deploy/authors +linkerd viz authz -n booksapp deploy/authors ROUTE SERVER AUTHORIZATION UNAUTHORIZED SUCCESS RPS LATENCY_P50 LATENCY_P95 LATENCY_P99 authors-get-route authors-server authorizationpolicy/authors-get-policy 0.0rps 100.00% 0.1rps 2ms 2ms 2ms authors-probe-route authors-server authorizationpolicy/authors-probe-policy 0.0rps 100.00% 0.1rps 1ms 1ms 1ms @@ -383,7 +383,7 @@ requests, but we haven't _authorized_ requests to that route. Running the requests to `authors-modify-route`: ```bash -$ linkerd viz authz -n booksapp deploy/authors +linkerd viz authz -n booksapp deploy/authors ROUTE SERVER AUTHORIZATION UNAUTHORIZED SUCCESS RPS LATENCY_P50 LATENCY_P95 LATENCY_P99 authors-get-route authors-server authorizationpolicy/authors-get-policy - - - - - - authors-modify-route authors-server 9.7rps 0.00% 0.0rps 0ms 0ms 0ms @@ -442,7 +442,7 @@ Running the `linkerd viz authz` command one last time, we now see that all traffic is authorized: ```bash -$ linkerd viz authz -n booksapp deploy/authors +linkerd viz authz -n booksapp deploy/authors ROUTE SERVER AUTHORIZATION UNAUTHORIZED SUCCESS RPS LATENCY_P50 LATENCY_P95 LATENCY_P99 authors-get-route authors-server authorizationpolicy/authors-get-policy 0.0rps 100.00% 0.1rps 0ms 0ms 0ms authors-modify-route authors-server authorizationpolicy/authors-modify-policy 0.0rps 100.00% 0.0rps 0ms 0ms 0ms diff --git a/linkerd.io/content/2.16/tasks/multicluster-using-statefulsets.md b/linkerd.io/content/2.16/tasks/multicluster-using-statefulsets.md index 912241d181..c8d4400521 100644 --- a/linkerd.io/content/2.16/tasks/multicluster-using-statefulsets.md +++ b/linkerd.io/content/2.16/tasks/multicluster-using-statefulsets.md @@ -48,8 +48,8 @@ The first step is to clone the demo repository on your local machine. ```sh # clone example repository -$ git clone git@github.com:mateiidavid/l2d-k3d-statefulset.git -$ cd l2d-k3d-statefulset +git clone git@github.com:mateiidavid/l2d-k3d-statefulset.git +cd l2d-k3d-statefulset ``` The second step consists of creating two `k3d` clusters named `east` and `west`, @@ -60,10 +60,10 @@ everything. ```sh # create k3d clusters -$ ./create.sh +./create.sh # list the clusters -$ k3d cluster list +k3d cluster list NAME SERVERS AGENTS LOADBALANCER east 1/1 0/0 true west 1/1 0/0 true @@ -78,10 +78,10 @@ provided scripts, but feel free to have a look! ```sh # Install Linkerd and multicluster, output to check should be a success -$ ./install.sh +./install.sh # Next, link the two clusters together -$ ./link.sh +./link.sh ``` Perfect! If you've made it this far with no errors, then it's a good sign. In @@ -101,17 +101,17 @@ communication. First, we will deploy our pods and services: ```sh # deploy services and mesh namespaces -$ ./deploy.sh +./deploy.sh # verify both clusters # # verify east -$ kubectl --context=k3d-east get pods +kubectl --context=k3d-east get pods NAME READY STATUS RESTARTS AGE curl-56dc7d945d-96r6p 2/2 Running 0 7s # verify west has headless service -$ kubectl --context=k3d-west get services +kubectl --context=k3d-west get services NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE kubernetes ClusterIP 10.43.0.1 443/TCP 10m nginx-svc ClusterIP None 80/TCP 8s @@ -119,7 +119,7 @@ nginx-svc ClusterIP None 80/TCP 8s # verify west has statefulset # # this may take a while to come up -$ kubectl --context=k3d-west get pods +kubectl --context=k3d-west get pods NAME READY STATUS RESTARTS AGE nginx-set-0 2/2 Running 0 53s nginx-set-1 2/2 Running 0 43s @@ -130,7 +130,7 @@ Before we go further, let's have a look at the endpoints object for the `nginx-svc`: ```sh -$ kubectl --context=k3d-west get endpoints nginx-svc -o yaml +kubectl --context=k3d-west get endpoints nginx-svc -o yaml ... subsets: - addresses: @@ -170,23 +170,23 @@ would get an answer back. We can test this out by applying the curl pod to the `west` cluster: ```sh -$ kubectl --context=k3d-west apply -f east/curl.yml -$ kubectl --context=k3d-west get pods +kubectl --context=k3d-west apply -f east/curl.yml +kubectl --context=k3d-west get pods NAME READY STATUS RESTARTS AGE nginx-set-0 2/2 Running 0 5m8s nginx-set-1 2/2 Running 0 4m58s nginx-set-2 2/2 Running 0 4m51s curl-56dc7d945d-s4n8j 0/2 PodInitializing 0 4s -$ kubectl --context=k3d-west exec -it curl-56dc7d945d-s4n8j -c curl -- bin/sh -/$ # prompt for curl pod +kubectl --context=k3d-west exec -it curl-56dc7d945d-s4n8j -c curl -- bin/sh +/# prompt for curl pod ``` If we now curl one of these instances, we will get back a response. ```sh # exec'd on the pod -/ $ curl nginx-set-0.nginx-svc.default.svc.west.cluster.local +/ curl nginx-set-0.nginx-svc.default.svc.west.cluster.local " @@ -218,10 +218,10 @@ Now, let's do the same, but this time from the `east` cluster. We will first export the service. ```sh -$ kubectl --context=k3d-west label service nginx-svc mirror.linkerd.io/exported="true" +kubectl --context=k3d-west label service nginx-svc mirror.linkerd.io/exported="true" service/nginx-svc labeled -$ kubectl --context=k3d-east get services +kubectl --context=k3d-east get services NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE kubernetes ClusterIP 10.43.0.1 443/TCP 20h nginx-svc-west ClusterIP None 80/TCP 29s @@ -235,7 +235,7 @@ endpoints for `nginx-svc-west` will have the same hostnames, but each hostname will point to one of the services we see above: ```sh -$ kubectl --context=k3d-east get endpoints nginx-svc-west -o yaml +kubectl --context=k3d-east get endpoints nginx-svc-west -o yaml subsets: - addresses: - hostname: nginx-set-0 @@ -251,17 +251,17 @@ cluster (`west`), will be mirrored as a clusterIP service. We will see in a second why this matters. ```sh -$ kubectl --context=k3d-east get pods +kubectl --context=k3d-east get pods NAME READY STATUS RESTARTS AGE curl-56dc7d945d-96r6p 2/2 Running 0 23m # exec and curl -$ kubectl --context=k3d-east exec pod curl-56dc7d945d-96r6p -it -c curl -- bin/sh +kubectl --context=k3d-east exec pod curl-56dc7d945d-96r6p -it -c curl -- bin/sh # we want to curl the same hostname we see in the endpoints object above. # however, the service and cluster domain will now be different, since we # are in a different cluster. # -/ $ curl nginx-set-0.nginx-svc-west.default.svc.east.cluster.local +/ curl nginx-set-0.nginx-svc-west.default.svc.east.cluster.local @@ -329,8 +329,8 @@ validation. To clean-up, you can remove both clusters entirely using the k3d CLI: ```sh -$ k3d cluster delete east +k3d cluster delete east cluster east deleted -$ k3d cluster delete west +k3d cluster delete west cluster west deleted ``` diff --git a/linkerd.io/content/2.16/tasks/restricting-access.md b/linkerd.io/content/2.16/tasks/restricting-access.md index 5654518600..c9850725f7 100644 --- a/linkerd.io/content/2.16/tasks/restricting-access.md +++ b/linkerd.io/content/2.16/tasks/restricting-access.md @@ -21,9 +21,9 @@ haven't already done this. Inject and install the Emojivoto application: ```bash -$ linkerd inject https://run.linkerd.io/emojivoto.yml | kubectl apply -f - +linkerd inject https://run.linkerd.io/emojivoto.yml | kubectl apply -f - ... -$ linkerd check -n emojivoto --proxy -o short +linkerd check -n emojivoto --proxy -o short ... ``` diff --git a/linkerd.io/content/2.16/tasks/securing-linkerd-tap.md b/linkerd.io/content/2.16/tasks/securing-linkerd-tap.md index 8a802c890c..639f81692f 100644 --- a/linkerd.io/content/2.16/tasks/securing-linkerd-tap.md +++ b/linkerd.io/content/2.16/tasks/securing-linkerd-tap.md @@ -60,7 +60,7 @@ kubectl auth can-i watch deployments.tap.linkerd.io -n emojivoto --as $(whoami) You can also use the Linkerd CLI's `--as` flag to confirm: ```bash -$ linkerd viz tap -n linkerd deploy/linkerd-controller --as $(whoami) +linkerd viz tap -n linkerd deploy/linkerd-controller --as $(whoami) Cannot connect to Linkerd Viz: namespaces is forbidden: User "XXXX" cannot list resource "namespaces" in API group "" at the cluster scope Validate the install with: linkerd viz check ... @@ -77,7 +77,7 @@ To enable tap access to all resources in all namespaces, you may bind your user to the `linkerd-linkerd-tap-admin` ClusterRole, installed by default: ```bash -$ kubectl describe clusterroles/linkerd-linkerd-viz-tap-admin +kubectl describe clusterroles/linkerd-linkerd-viz-tap-admin Name: linkerd-linkerd-viz-tap-admin Labels: component=tap linkerd.io/extension=viz @@ -109,7 +109,7 @@ kubectl create clusterrolebinding \ You can verify you now have tap access with: ```bash -$ linkerd viz tap -n linkerd deploy/linkerd-controller --as $(whoami) +linkerd viz tap -n linkerd deploy/linkerd-controller --as $(whoami) req id=3:0 proxy=in src=10.244.0.1:37392 dst=10.244.0.13:9996 tls=not_provided_by_remote :method=GET :authority=10.244.0.13:9996 :path=/ping ... ``` @@ -143,14 +143,14 @@ Because GCloud provides this additional level of access, there are cases where not. To validate this, check whether your GCloud user has Tap access: ```bash -$ kubectl auth can-i watch pods.tap.linkerd.io --all-namespaces +kubectl auth can-i watch pods.tap.linkerd.io --all-namespaces yes ``` And then validate whether your RBAC user has Tap access: ```bash -$ kubectl auth can-i watch pods.tap.linkerd.io --all-namespaces --as $(gcloud config get-value account) +kubectl auth can-i watch pods.tap.linkerd.io --all-namespaces --as $(gcloud config get-value account) no - no RBAC policy matched ``` @@ -187,14 +187,14 @@ privileges necessary to tap resources. To confirm: ```bash -$ kubectl auth can-i watch pods.tap.linkerd.io --all-namespaces --as system:serviceaccount:linkerd-viz:web +kubectl auth can-i watch pods.tap.linkerd.io --all-namespaces --as system:serviceaccount:linkerd-viz:web yes ``` This access is enabled via a `linkerd-linkerd-viz-web-admin` ClusterRoleBinding: ```bash -$ kubectl describe clusterrolebindings/linkerd-linkerd-viz-web-admin +kubectl describe clusterrolebindings/linkerd-linkerd-viz-web-admin Name: linkerd-linkerd-viz-web-admin Labels: component=web linkerd.io/extensions=viz @@ -227,6 +227,6 @@ kubectl delete clusterrolebindings/linkerd-linkerd-viz-web-admin To confirm: ```bash -$ kubectl auth can-i watch pods.tap.linkerd.io --all-namespaces --as system:serviceaccount:linkerd-viz:web +kubectl auth can-i watch pods.tap.linkerd.io --all-namespaces --as system:serviceaccount:linkerd-viz:web no ``` diff --git a/linkerd.io/content/2.16/tasks/troubleshooting.md b/linkerd.io/content/2.16/tasks/troubleshooting.md index bc58809cf8..e8fd89470b 100644 --- a/linkerd.io/content/2.16/tasks/troubleshooting.md +++ b/linkerd.io/content/2.16/tasks/troubleshooting.md @@ -230,7 +230,7 @@ Example failure: Ensure the Linkerd ClusterRoles exist: ```bash -$ kubectl get clusterroles | grep linkerd +kubectl get clusterroles | grep linkerd linkerd-linkerd-destination 9d linkerd-linkerd-identity 9d linkerd-linkerd-proxy-injector 9d @@ -240,7 +240,7 @@ linkerd-policy 9d Also ensure you have permission to create ClusterRoles: ```bash -$ kubectl auth can-i create clusterroles +kubectl auth can-i create clusterroles yes ``` @@ -257,7 +257,7 @@ Example failure: Ensure the Linkerd ClusterRoleBindings exist: ```bash -$ kubectl get clusterrolebindings | grep linkerd +kubectl get clusterrolebindings | grep linkerd linkerd-linkerd-destination 9d linkerd-linkerd-identity 9d linkerd-linkerd-proxy-injector 9d @@ -267,7 +267,7 @@ linkerd-destination-policy 9d Also ensure you have permission to create ClusterRoleBindings: ```bash -$ kubectl auth can-i create clusterrolebindings +kubectl auth can-i create clusterrolebindings yes ``` @@ -284,7 +284,7 @@ Example failure: Ensure the Linkerd ServiceAccounts exist: ```bash -$ kubectl -n linkerd get serviceaccounts +kubectl -n linkerd get serviceaccounts NAME SECRETS AGE default 1 14m linkerd-destination 1 14m @@ -297,7 +297,7 @@ Also ensure you have permission to create ServiceAccounts in the Linkerd namespace: ```bash -$ kubectl -n linkerd auth can-i create serviceaccounts +kubectl -n linkerd auth can-i create serviceaccounts yes ``` @@ -314,7 +314,7 @@ Example failure: Ensure the Linkerd CRD exists: ```bash -$ kubectl get customresourcedefinitions +kubectl get customresourcedefinitions NAME CREATED AT serviceprofiles.linkerd.io 2019-04-25T21:47:31Z ``` @@ -322,7 +322,7 @@ serviceprofiles.linkerd.io 2019-04-25T21:47:31Z Also ensure you have permission to create CRDs: ```bash -$ kubectl auth can-i create customresourcedefinitions +kubectl auth can-i create customresourcedefinitions yes ``` @@ -339,14 +339,14 @@ Example failure: Ensure the Linkerd MutatingWebhookConfigurations exists: ```bash -$ kubectl get mutatingwebhookconfigurations | grep linkerd +kubectl get mutatingwebhookconfigurations | grep linkerd linkerd-proxy-injector-webhook-config 2019-07-01T13:13:26Z ``` Also ensure you have permission to create MutatingWebhookConfigurations: ```bash -$ kubectl auth can-i create mutatingwebhookconfigurations +kubectl auth can-i create mutatingwebhookconfigurations yes ``` @@ -363,14 +363,14 @@ Example failure: Ensure the Linkerd ValidatingWebhookConfiguration exists: ```bash -$ kubectl get validatingwebhookconfigurations | grep linkerd +kubectl get validatingwebhookconfigurations | grep linkerd linkerd-sp-validator-webhook-config 2019-07-01T13:13:26Z ``` Also ensure you have permission to create ValidatingWebhookConfigurations: ```bash -$ kubectl auth can-i create validatingwebhookconfigurations +kubectl auth can-i create validatingwebhookconfigurations yes ``` @@ -418,7 +418,7 @@ Example failure: Ensure the Linkerd ConfigMap exists: ```bash -$ kubectl -n linkerd get configmap/linkerd-config +kubectl -n linkerd get configmap/linkerd-config NAME DATA AGE linkerd-config 3 61m ``` @@ -426,7 +426,7 @@ linkerd-config 3 61m Also ensure you have permission to create ConfigMaps: ```bash -$ kubectl -n linkerd auth can-i create configmap +kubectl -n linkerd auth can-i create configmap yes ``` @@ -780,7 +780,7 @@ Example failure: Verify the state of the control plane pods with: ```bash -$ kubectl -n linkerd get po +kubectl -n linkerd get po NAME READY STATUS RESTARTS AGE linkerd-destination-5fd7b5d466-szgqm 2/2 Running 1 12m linkerd-identity-54df78c479-hbh5m 2/2 Running 0 12m @@ -862,7 +862,7 @@ Ensure you can connect to the Linkerd version check endpoint from the environment the `linkerd` cli is running: ```bash -$ curl "https://versioncheck.linkerd.io/version.json?version=edge-19.1.2&uuid=test-uuid&source=cli" +curl "https://versioncheck.linkerd.io/version.json?version=edge-19.1.2&uuid=test-uuid&source=cli" {"stable":"stable-2.1.0","edge":"edge-19.1.2"} ``` @@ -961,7 +961,7 @@ normally. Example failure: ```bash -$ linkerd check --proxy --namespace foo +linkerd check --proxy --namespace foo ... × data plane namespace exists The "foo" namespace does not exist @@ -1133,7 +1133,7 @@ Example error: Ensure that the linkerd-cni-config ConfigMap exists in the CNI namespace: ```bash -$ kubectl get cm linkerd-cni-config -n linkerd-cni +kubectl get cm linkerd-cni-config -n linkerd-cni NAME PRIV CAPS SELINUX RUNASUSER FSGROUP SUPGROUP READONLYROOTFS VOLUMES linkerd-linkerd-cni-cni false RunAsAny RunAsAny RunAsAny RunAsAny false hostPath,secret ``` @@ -1141,7 +1141,7 @@ linkerd-linkerd-cni-cni false RunAsAny RunAsAny RunAsAny RunAs Also ensure you have permission to create ConfigMaps: ```bash -$ kubectl auth can-i create ConfigMaps +kubectl auth can-i create ConfigMaps yes ``` @@ -1158,7 +1158,7 @@ Example error: Ensure that the cluster role exists: ```bash -$ kubectl get clusterrole linkerd-cni +kubectl get clusterrole linkerd-cni NAME AGE linkerd-cni 54m ``` @@ -1166,7 +1166,7 @@ linkerd-cni 54m Also ensure you have permission to create ClusterRoles: ```bash -$ kubectl auth can-i create ClusterRoles +kubectl auth can-i create ClusterRoles yes ``` @@ -1183,7 +1183,7 @@ Example error: Ensure that the cluster role binding exists: ```bash -$ kubectl get clusterrolebinding linkerd-cni +kubectl get clusterrolebinding linkerd-cni NAME AGE linkerd-cni 54m ``` @@ -1191,7 +1191,7 @@ linkerd-cni 54m Also ensure you have permission to create ClusterRoleBindings: ```bash -$ kubectl auth can-i create ClusterRoleBindings +kubectl auth can-i create ClusterRoleBindings yes ``` @@ -1208,7 +1208,7 @@ Example error: Ensure that the CNI service account exists in the CNI namespace: ```bash -$ kubectl get ServiceAccount linkerd-cni -n linkerd-cni +kubectl get ServiceAccount linkerd-cni -n linkerd-cni NAME SECRETS AGE linkerd-cni 1 45m ``` @@ -1216,7 +1216,7 @@ linkerd-cni 1 45m Also ensure you have permission to create ServiceAccount: ```bash -$ kubectl auth can-i create ServiceAccounts -n linkerd-cni +kubectl auth can-i create ServiceAccounts -n linkerd-cni yes ``` @@ -1233,7 +1233,7 @@ Example error: Ensure that the CNI daemonset exists in the CNI namespace: ```bash -$ kubectl get ds -n linkerd-cni +kubectl get ds -n linkerd-cni NAME DESIRED CURRENT READY UP-TO-DATE AVAILABLE NODE SELECTOR AGE linkerd-cni 1 1 1 1 1 beta.kubernetes.io/os=linux 14m ``` @@ -1241,7 +1241,7 @@ linkerd-cni 1 1 1 1 1 beta.kubernet Also ensure you have permission to create DaemonSets: ```bash -$ kubectl auth can-i create DaemonSets -n linkerd-cni +kubectl auth can-i create DaemonSets -n linkerd-cni yes ``` @@ -1258,7 +1258,7 @@ Example failure: Ensure that all the CNI pods are running: ```bash -$ kubectl get po -n linkerd-cn +kubectl get po -n linkerd-cn NAME READY STATUS RESTARTS AGE linkerd-cni-rzp2q 1/1 Running 0 9m20s linkerd-cni-mf564 1/1 Running 0 9m22s @@ -1268,7 +1268,7 @@ linkerd-cni-p5670 1/1 Running 0 9m25s Ensure that all pods have finished the deployment of the CNI config and binary: ```bash -$ kubectl logs linkerd-cni-rzp2q -n linkerd-cni +kubectl logs linkerd-cni-rzp2q -n linkerd-cni Wrote linkerd CNI binaries to /host/opt/cni/bin Created CNI config /host/etc/cni/net.d/10-kindnet.conflist Done configuring CNI. Sleep=true @@ -1296,7 +1296,7 @@ Make sure multicluster extension is correctly installed and that the `links.multicluster.linkerd.io` CRD is present. ```bash -$ kubectl get crds | grep multicluster +kubectl get crds | grep multicluster NAME CREATED AT links.multicluster.linkerd.io 2021-03-10T09:58:10Z ``` @@ -1375,7 +1375,7 @@ the rules section. Expected rules for `linkerd-service-mirror-access-local-resources` cluster role: ```bash -$ kubectl --context=local get clusterrole linkerd-service-mirror-access-local-resources -o yaml +kubectl --context=local get clusterrole linkerd-service-mirror-access-local-resources -o yaml kind: ClusterRole metadata: labels: @@ -1408,7 +1408,7 @@ rules: Expected rules for `linkerd-service-mirror-read-remote-creds` role: ```bash -$ kubectl --context=local get role linkerd-service-mirror-read-remote-creds -n linkerd-multicluster -o yaml +kubectl --context=local get role linkerd-service-mirror-read-remote-creds -n linkerd-multicluster -o yaml kind: Role metadata: labels: @@ -1441,7 +1441,7 @@ everything to start up. If this is a permanent error, you'll want to validate the state of the controller pod with: ```bash -$ kubectl --all-namespaces get po --selector linkerd.io/control-plane-component=linkerd-service-mirror +kubectl --all-namespaces get po --selector linkerd.io/control-plane-component=linkerd-service-mirror NAME READY STATUS RESTARTS AGE linkerd-service-mirror-7bb8ff5967-zg265 2/2 Running 0 50m ``` @@ -1559,7 +1559,7 @@ Example failure: Ensure the linkerd-viz extension ClusterRoles exist: ```bash -$ kubectl get clusterroles | grep linkerd-viz +kubectl get clusterroles | grep linkerd-viz linkerd-linkerd-viz-metrics-api 2021-01-26T18:02:17Z linkerd-linkerd-viz-prometheus 2021-01-26T18:02:17Z linkerd-linkerd-viz-tap 2021-01-26T18:02:17Z @@ -1570,7 +1570,7 @@ linkerd-linkerd-viz-web-check 2021-01-2 Also ensure you have permission to create ClusterRoles: ```bash -$ kubectl auth can-i create clusterroles +kubectl auth can-i create clusterroles yes ``` @@ -1587,7 +1587,7 @@ Example failure: Ensure the linkerd-viz extension ClusterRoleBindings exist: ```bash -$ kubectl get clusterrolebindings | grep linkerd-viz +kubectl get clusterrolebindings | grep linkerd-viz linkerd-linkerd-viz-metrics-api ClusterRole/linkerd-linkerd-viz-metrics-api 18h linkerd-linkerd-viz-prometheus ClusterRole/linkerd-linkerd-viz-prometheus 18h linkerd-linkerd-viz-tap ClusterRole/linkerd-linkerd-viz-tap 18h @@ -1599,7 +1599,7 @@ linkerd-linkerd-viz-web-check ClusterRole/linkerd-linke Also ensure you have permission to create ClusterRoleBindings: ```bash -$ kubectl auth can-i create clusterrolebindings +kubectl auth can-i create clusterrolebindings yes ``` @@ -1688,7 +1688,7 @@ requirements in the cluster: Ensure all the linkerd-viz pods are injected ```bash -$ kubectl -n linkerd-viz get pods +kubectl -n linkerd-viz get pods NAME READY STATUS RESTARTS AGE grafana-68cddd7cc8-nrv4h 2/2 Running 3 18h metrics-api-77f684f7c7-hnw8r 2/2 Running 2 18h @@ -1712,7 +1712,7 @@ Make sure that the `proxy-injector` is working correctly by running Ensure all the linkerd-viz pods are running with 2/2 ```bash -$ kubectl -n linkerd-viz get pods +kubectl -n linkerd-viz get pods NAME READY STATUS RESTARTS AGE grafana-68cddd7cc8-nrv4h 2/2 Running 3 18h metrics-api-77f684f7c7-hnw8r 2/2 Running 2 18h @@ -1895,7 +1895,7 @@ versions in sync by updating either the CLI or linkerd-jaeger as necessary. Ensure all the jaeger pods are injected ```bash -$ kubectl -n linkerd-jaeger get pods +kubectl -n linkerd-jaeger get pods NAME READY STATUS RESTARTS AGE collector-69cc44dfbc-rhpfg 2/2 Running 0 11s jaeger-6f98d5c979-scqlq 2/2 Running 0 11s @@ -1916,7 +1916,7 @@ Make sure that the `proxy-injector` is working correctly by running Ensure all the linkerd-jaeger pods are running with 2/2 ```bash -$ kubectl -n linkerd-jaeger get pods +kubectl -n linkerd-jaeger get pods NAME READY STATUS RESTARTS AGE jaeger-injector-548684d74b-bcq5h 2/2 Running 0 5s collector-69cc44dfbc-wqf6s 2/2 Running 0 5s @@ -1965,7 +1965,7 @@ Ensure you can connect to the Linkerd Buoyant version check endpoint from the environment the `linkerd` cli is running: ```bash -$ curl https://buoyant.cloud/version.json +curl https://buoyant.cloud/version.json {"linkerd-buoyant":"v0.4.4"} ``` @@ -2030,7 +2030,7 @@ linkerd-buoyant install | kubectl apply -f - Ensure that the cluster role exists: ```bash -$ kubectl get clusterrole buoyant-cloud-agent +kubectl get clusterrole buoyant-cloud-agent NAME CREATED AT buoyant-cloud-agent 2020-11-13T00:59:50Z ``` @@ -2038,7 +2038,7 @@ buoyant-cloud-agent 2020-11-13T00:59:50Z Also ensure you have permission to create ClusterRoles: ```bash -$ kubectl auth can-i create ClusterRoles +kubectl auth can-i create ClusterRoles yes ``` @@ -2053,7 +2053,7 @@ yes Ensure that the cluster role binding exists: ```bash -$ kubectl get clusterrolebinding buoyant-cloud-agent +kubectl get clusterrolebinding buoyant-cloud-agent NAME ROLE AGE buoyant-cloud-agent ClusterRole/buoyant-cloud-agent 301d ``` @@ -2061,7 +2061,7 @@ buoyant-cloud-agent ClusterRole/buoyant-cloud-agent 301d Also ensure you have permission to create ClusterRoleBindings: ```bash -$ kubectl auth can-i create ClusterRoleBindings +kubectl auth can-i create ClusterRoleBindings yes ``` @@ -2076,7 +2076,7 @@ yes Ensure that the service account exists: ```bash -$ kubectl -n buoyant-cloud get serviceaccount buoyant-cloud-agent +kubectl -n buoyant-cloud get serviceaccount buoyant-cloud-agent NAME SECRETS AGE buoyant-cloud-agent 1 301d ``` @@ -2084,7 +2084,7 @@ buoyant-cloud-agent 1 301d Also ensure you have permission to create ServiceAccounts: ```bash -$ kubectl -n buoyant-cloud auth can-i create ServiceAccount +kubectl -n buoyant-cloud auth can-i create ServiceAccount yes ``` @@ -2099,7 +2099,7 @@ yes Ensure that the secret exists: ```bash -$ kubectl -n buoyant-cloud get secret buoyant-cloud-id +kubectl -n buoyant-cloud get secret buoyant-cloud-id NAME TYPE DATA AGE buoyant-cloud-id Opaque 4 301d ``` @@ -2107,7 +2107,7 @@ buoyant-cloud-id Opaque 4 301d Also ensure you have permission to create ServiceAccounts: ```bash -$ kubectl -n buoyant-cloud auth can-i create ServiceAccount +kubectl -n buoyant-cloud auth can-i create ServiceAccount yes ``` @@ -2145,7 +2145,7 @@ everything to start up. If this is a permanent error, you'll want to validate the state of the `buoyant-cloud-agent` Deployment with: ```bash -$ kubectl -n buoyant-cloud get po --selector app=buoyant-cloud-agent +kubectl -n buoyant-cloud get po --selector app=buoyant-cloud-agent NAME READY STATUS RESTARTS AGE buoyant-cloud-agent-6b8c6888d7-htr7d 2/2 Running 0 156m ``` @@ -2168,7 +2168,7 @@ Ensure the `buoyant-cloud-agent` pod is injected, the `READY` column should show `2/2`: ```bash -$ kubectl -n buoyant-cloud get pods --selector app=buoyant-cloud-agent +kubectl -n buoyant-cloud get pods --selector app=buoyant-cloud-agent NAME READY STATUS RESTARTS AGE buoyant-cloud-agent-6b8c6888d7-htr7d 2/2 Running 0 161m ``` @@ -2187,7 +2187,7 @@ Make sure that the `proxy-injector` is working correctly by running Check the version with: ```bash -$ linkerd-buoyant version +linkerd-buoyant version CLI version: v0.4.4 Agent version: v0.4.4 ``` @@ -2246,7 +2246,7 @@ everything to start up. If this is a permanent error, you'll want to validate the state of the `buoyant-cloud-metrics` DaemonSet with: ```bash -$ kubectl -n buoyant-cloud get po --selector app=buoyant-cloud-metrics +kubectl -n buoyant-cloud get po --selector app=buoyant-cloud-metrics NAME READY STATUS RESTARTS AGE buoyant-cloud-metrics-kt9mv 2/2 Running 0 163m buoyant-cloud-metrics-q8jhj 2/2 Running 0 163m @@ -2272,7 +2272,7 @@ Ensure the `buoyant-cloud-metrics` pods are injected, the `READY` column should show `2/2`: ```bash -$ kubectl -n buoyant-cloud get pods --selector app=buoyant-cloud-metrics +kubectl -n buoyant-cloud get pods --selector app=buoyant-cloud-metrics NAME READY STATUS RESTARTS AGE buoyant-cloud-metrics-kt9mv 2/2 Running 0 166m buoyant-cloud-metrics-q8jhj 2/2 Running 0 166m @@ -2294,7 +2294,7 @@ Make sure that the `proxy-injector` is working correctly by running Check the version with: ```bash -$ kubectl -n buoyant-cloud get daemonset/buoyant-cloud-metrics -o jsonpath='{.metadata.labels}' +kubectl -n buoyant-cloud get daemonset/buoyant-cloud-metrics -o jsonpath='{.metadata.labels}' {"app.kubernetes.io/name":"metrics","app.kubernetes.io/part-of":"buoyant-cloud","app.kubernetes.io/version":"v0.4.4"} ``` diff --git a/linkerd.io/content/2.16/tasks/upgrade.md b/linkerd.io/content/2.16/tasks/upgrade.md index 23547217a4..a73f4d54fc 100644 --- a/linkerd.io/content/2.16/tasks/upgrade.md +++ b/linkerd.io/content/2.16/tasks/upgrade.md @@ -379,7 +379,7 @@ Find the release name you used for the `linkerd2` chart, and the namespace where this release stored its config: ```bash -$ helm ls -A +helm ls -A NAME NAMESPACE REVISION UPDATED STATUS CHART APP VERSION linkerd default 1 2021-11-22 17:14:50.751436374 -0500 -05 deployed linkerd2-2.11.1 stable-2.11.1 ``` @@ -412,18 +412,18 @@ the `linkerd-crds`, `linkerd-control-plane` and `linkerd-smi` charts: ```bash # First migrate the CRDs -$ helm -n default get manifest linkerd | \ +helm -n default get manifest linkerd | \ yq 'select(.kind == "CustomResourceDefinition") | .metadata.name' | \ grep -v '\-\-\-' | \ xargs -n1 sh -c \ 'kubectl annotate --overwrite crd/$0 meta.helm.sh/release-name=linkerd-crds meta.helm.sh/release-namespace=linkerd' # Special case for TrafficSplit (only use if you have TrafficSplit CRs) -$ kubectl annotate --overwrite crd/trafficsplits.split.smi-spec.io \ +kubectl annotate --overwrite crd/trafficsplits.split.smi-spec.io \ meta.helm.sh/release-name=linkerd-smi meta.helm.sh/release-namespace=linkerd-smi # Now migrate all the other resources -$ helm -n default get manifest linkerd | \ +helm -n default get manifest linkerd | \ yq 'select(.kind != "CustomResourceDefinition")' | \ yq '.kind, .metadata.name, .metadata.namespace' | \ grep -v '\-\-\-' | @@ -437,14 +437,14 @@ above. ```bash # First make sure you update the helm repo -$ helm repo up +helm repo up # Install the linkerd-crds chart -$ helm install linkerd-crds -n linkerd --create-namespace linkerd/linkerd-crds +helm install linkerd-crds -n linkerd --create-namespace linkerd/linkerd-crds # Install the linkerd-control-plane chart # (remember to add any customizations you retrieved above) -$ helm install linkerd-control-plane \ +helm install linkerd-control-plane \ -n linkerd \ --set-file identityTrustAnchorsPEM=ca.crt \ --set-file identity.issuer.tls.crtPEM=issuer.crt \ @@ -452,8 +452,8 @@ $ helm install linkerd-control-plane \ linkerd/linkerd-control-plane # Optional: if using TrafficSplit CRs -$ helm repo add l5d-smi https://linkerd.github.io/linkerd-smi -$ helm install linkerd-smi -n linkerd-smi --create-namespace l5d-smi/linkerd-smi +helm repo add l5d-smi https://linkerd.github.io/linkerd-smi +helm install linkerd-smi -n linkerd-smi --create-namespace l5d-smi/linkerd-smi ``` ##### Cleaning up the old linkerd2 Helm release @@ -464,7 +464,7 @@ remove the Helm release config for the old `linkerd2` chart (assuming you used the "Secret" storage backend, which is the default): ```bash -$ kubectl -n default delete secret \ +kubectl -n default delete secret \ --field-selector type=helm.sh/release.v1 \ -l name=linkerd,owner=helm ``` diff --git a/linkerd.io/content/2.17/reference/cli/check.md b/linkerd.io/content/2.17/reference/cli/check.md index 7cd61cd237..67a2486908 100644 --- a/linkerd.io/content/2.17/reference/cli/check.md +++ b/linkerd.io/content/2.17/reference/cli/check.md @@ -12,7 +12,7 @@ for a full list of all the possible checks, what they do and how to fix them. ## Example output ```bash -$ linkerd check +linkerd check kubernetes-api -------------- √ can initialize the client diff --git a/linkerd.io/content/2.17/reference/iptables.md b/linkerd.io/content/2.17/reference/iptables.md index 67a7ea89de..9b4d229a59 100644 --- a/linkerd.io/content/2.17/reference/iptables.md +++ b/linkerd.io/content/2.17/reference/iptables.md @@ -164,7 +164,7 @@ Alternatively, if you want to inspect the iptables rules created for a pod, you can retrieve them through the following command: ```bash -$ kubectl -n logs linkerd-init +kubectl -n logs linkerd-init # where is the name of the pod # you want to see the iptables rules for ``` diff --git a/linkerd.io/content/2.17/tasks/configuring-dynamic-request-routing.md b/linkerd.io/content/2.17/tasks/configuring-dynamic-request-routing.md index 004b50ded6..a44d12a1a5 100644 --- a/linkerd.io/content/2.17/tasks/configuring-dynamic-request-routing.md +++ b/linkerd.io/content/2.17/tasks/configuring-dynamic-request-routing.md @@ -67,7 +67,7 @@ Requests to `/echo` on port 9898 to the frontend pod will get forwarded the pod pointed by the Service `backend-a-podinfo`: ```bash -$ curl -sX POST localhost:9898/echo \ +curl -sX POST localhost:9898/echo \ | grep -o 'PODINFO_UI_MESSAGE=. backend' PODINFO_UI_MESSAGE=A backend @@ -132,7 +132,7 @@ the `backend-a-podinfo` Service. The previous requests should still reach `backend-a-podinfo` only: ```bash -$ curl -sX POST localhost:9898/echo \ +curl -sX POST localhost:9898/echo \ | grep -o 'PODINFO_UI_MESSAGE=. backend' PODINFO_UI_MESSAGE=A backend @@ -142,7 +142,7 @@ But if we add the `x-request-id: alternative` header, they get routed to `backend-b-podinfo`: ```bash -$ curl -sX POST \ +curl -sX POST \ -H 'x-request-id: alternative' \ localhost:9898/echo \ | grep -o 'PODINFO_UI_MESSAGE=. backend' diff --git a/linkerd.io/content/2.17/tasks/configuring-per-route-policy.md b/linkerd.io/content/2.17/tasks/configuring-per-route-policy.md index a5c8b5c2ef..63b79fc6d4 100644 --- a/linkerd.io/content/2.17/tasks/configuring-per-route-policy.md +++ b/linkerd.io/content/2.17/tasks/configuring-per-route-policy.md @@ -30,7 +30,7 @@ haven't already done this. Inject and install the Books demo application: ```bash -$ kubectl create ns booksapp && \ +kubectl create ns booksapp && \ curl --proto '=https' --tlsv1.2 -sSfL https://run.linkerd.io/booksapp.yml \ | linkerd inject - \ | kubectl -n booksapp apply -f - @@ -44,21 +44,21 @@ run in the `booksapp` namespace. Confirm that the Linkerd data plane was injected successfully: ```bash -$ linkerd check -n booksapp --proxy -o short +linkerd check -n booksapp --proxy -o short ``` You can take a quick look at all the components that were added to your cluster by running: ```bash -$ kubectl -n booksapp get all +kubectl -n booksapp get all ``` Once the rollout has completed successfully, you can access the app itself by port-forwarding `webapp` locally: ```bash -$ kubectl -n booksapp port-forward svc/webapp 7000 & +kubectl -n booksapp port-forward svc/webapp 7000 & ``` Open [http://localhost:7000/](http://localhost:7000/) in your browser to see the @@ -87,7 +87,7 @@ First, let's run the `linkerd viz authz` command to list the authorization resources that currently exist for the `authors` deployment: ```bash -$ linkerd viz authz -n booksapp deploy/authors +linkerd viz authz -n booksapp deploy/authors ROUTE SERVER AUTHORIZATION UNAUTHORIZED SUCCESS RPS LATENCY_P50 LATENCY_P95 LATENCY_P99 default default:all-unauthenticated default/all-unauthenticated 0.0rps 70.31% 8.1rps 1ms 43ms 49ms probe default:all-unauthenticated default/probe 0.0rps 100.00% 0.3rps 1ms 1ms 1ms @@ -124,7 +124,7 @@ Now that we've defined a [`Server`] for the authors `Deployment`, we can run the currently unauthorized: ```bash -$ linkerd viz authz -n booksapp deploy/authors +linkerd viz authz -n booksapp deploy/authors ROUTE SERVER AUTHORIZATION UNAUTHORIZED SUCCESS RPS LATENCY_P50 LATENCY_P95 LATENCY_P99 default authors-server 9.5rps 0.00% 0.0rps 0ms 0ms 0ms probe authors-server default/probe 0.0rps 100.00% 0.1rps 1ms 1ms 1ms @@ -312,7 +312,7 @@ network (0.0.0.0). Running `linkerd viz authz` again, we can now see that our new policies exist: ```bash -$ linkerd viz authz -n booksapp deploy/authors +linkerd viz authz -n booksapp deploy/authors ROUTE SERVER AUTHORIZATION UNAUTHORIZED SUCCESS RPS LATENCY_P50 LATENCY_P95 LATENCY_P99 authors-get-route authors-server authorizationpolicy/authors-get-policy 0.0rps 100.00% 0.1rps 2ms 2ms 2ms authors-probe-route authors-server authorizationpolicy/authors-probe-policy 0.0rps 100.00% 0.1rps 1ms 1ms 1ms @@ -383,7 +383,7 @@ requests, but we haven't _authorized_ requests to that route. Running the requests to `authors-modify-route`: ```bash -$ linkerd viz authz -n booksapp deploy/authors +linkerd viz authz -n booksapp deploy/authors ROUTE SERVER AUTHORIZATION UNAUTHORIZED SUCCESS RPS LATENCY_P50 LATENCY_P95 LATENCY_P99 authors-get-route authors-server authorizationpolicy/authors-get-policy - - - - - - authors-modify-route authors-server 9.7rps 0.00% 0.0rps 0ms 0ms 0ms @@ -442,7 +442,7 @@ Running the `linkerd viz authz` command one last time, we now see that all traffic is authorized: ```bash -$ linkerd viz authz -n booksapp deploy/authors +linkerd viz authz -n booksapp deploy/authors ROUTE SERVER AUTHORIZATION UNAUTHORIZED SUCCESS RPS LATENCY_P50 LATENCY_P95 LATENCY_P99 authors-get-route authors-server authorizationpolicy/authors-get-policy 0.0rps 100.00% 0.1rps 0ms 0ms 0ms authors-modify-route authors-server authorizationpolicy/authors-modify-policy 0.0rps 100.00% 0.0rps 0ms 0ms 0ms diff --git a/linkerd.io/content/2.17/tasks/managing-egress-traffic.md b/linkerd.io/content/2.17/tasks/managing-egress-traffic.md index d77f290917..cbcd48c416 100644 --- a/linkerd.io/content/2.17/tasks/managing-egress-traffic.md +++ b/linkerd.io/content/2.17/tasks/managing-egress-traffic.md @@ -69,7 +69,7 @@ Now SSH into the client container and start generating some external traffic: ```bash kubectl -n egress-test exec -it client-xxx -c client -- sh -$ while sleep 1; do curl -s http://httpbin.org/get ; done +while sleep 1; do curl -s http://httpbin.org/get ; done ``` In a separate shell, you can use the Linkerd diagnostics command to visualize @@ -190,7 +190,7 @@ Interestingly enough though, if we go back to our client shell and we try to initiate HTTPS traffic to the same service, it will not be allowed: ```bash -~ $ curl -v https://httpbin.org/get +~ curl -v https://httpbin.org/get curl: (35) TLS connect error: error:00000000:lib(0)::reason(0) ``` @@ -413,7 +413,7 @@ Now let's verify all works as expected: ```bash # plaintext traffic goes as expected to the /get path -$ curl http://httpbin.org/get +curl http://httpbin.org/get { "args": {}, "headers": { @@ -427,14 +427,14 @@ $ curl http://httpbin.org/get } # encrypted traffic can target all paths and hosts -$ curl https://httpbin.org/ip +curl https://httpbin.org/ip { "origin": "51.116.126.217" } # arbitrary unencrypted traffic goes to the internal service -$ curl http://google.com +curl http://google.com { "requestUID": "in:http-sid:terminus-grpc:-1-h1:80-190120723", "payload": "You cannot go there right now"} diff --git a/linkerd.io/content/2.17/tasks/multicluster-using-statefulsets.md b/linkerd.io/content/2.17/tasks/multicluster-using-statefulsets.md index 9d8730b5b0..c720c09563 100644 --- a/linkerd.io/content/2.17/tasks/multicluster-using-statefulsets.md +++ b/linkerd.io/content/2.17/tasks/multicluster-using-statefulsets.md @@ -48,8 +48,8 @@ The first step is to clone the demo repository on your local machine. ```sh # clone example repository -$ git clone git@github.com:mateiidavid/l2d-k3d-statefulset.git -$ cd l2d-k3d-statefulset +git clone git@github.com:mateiidavid/l2d-k3d-statefulset.git +cd l2d-k3d-statefulset ``` The second step consists of creating two `k3d` clusters named `east` and `west`, @@ -60,10 +60,10 @@ everything. ```sh # create k3d clusters -$ ./create.sh +./create.sh # list the clusters -$ k3d cluster list +k3d cluster list NAME SERVERS AGENTS LOADBALANCER east 1/1 0/0 true west 1/1 0/0 true @@ -78,10 +78,10 @@ provided scripts, but feel free to have a look! ```sh # Install Linkerd and multicluster, output to check should be a success -$ ./install.sh +./install.sh # Next, link the two clusters together -$ ./link.sh +./link.sh ``` Perfect! If you've made it this far with no errors, then it's a good sign. In @@ -101,17 +101,17 @@ communication. First, we will deploy our pods and services: ```sh # deploy services and mesh namespaces -$ ./deploy.sh +./deploy.sh # verify both clusters # # verify east -$ kubectl --context=k3d-east get pods +kubectl --context=k3d-east get pods NAME READY STATUS RESTARTS AGE curl-56dc7d945d-96r6p 2/2 Running 0 7s # verify west has headless service -$ kubectl --context=k3d-west get services +kubectl --context=k3d-west get services NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE kubernetes ClusterIP 10.43.0.1 443/TCP 10m nginx-svc ClusterIP None 80/TCP 8s @@ -119,7 +119,7 @@ nginx-svc ClusterIP None 80/TCP 8s # verify west has statefulset # # this may take a while to come up -$ kubectl --context=k3d-west get pods +kubectl --context=k3d-west get pods NAME READY STATUS RESTARTS AGE nginx-set-0 2/2 Running 0 53s nginx-set-1 2/2 Running 0 43s @@ -130,7 +130,7 @@ Before we go further, let's have a look at the endpoints object for the `nginx-svc`: ```sh -$ kubectl --context=k3d-west get endpoints nginx-svc -o yaml +kubectl --context=k3d-west get endpoints nginx-svc -o yaml ... subsets: - addresses: @@ -170,23 +170,23 @@ would get an answer back. We can test this out by applying the curl pod to the `west` cluster: ```sh -$ kubectl --context=k3d-west apply -f east/curl.yml -$ kubectl --context=k3d-west get pods +kubectl --context=k3d-west apply -f east/curl.yml +kubectl --context=k3d-west get pods NAME READY STATUS RESTARTS AGE nginx-set-0 2/2 Running 0 5m8s nginx-set-1 2/2 Running 0 4m58s nginx-set-2 2/2 Running 0 4m51s curl-56dc7d945d-s4n8j 0/2 PodInitializing 0 4s -$ kubectl --context=k3d-west exec -it curl-56dc7d945d-s4n8j -c curl -- bin/sh -/$ # prompt for curl pod +kubectl --context=k3d-west exec -it curl-56dc7d945d-s4n8j -c curl -- bin/sh +/# prompt for curl pod ``` If we now curl one of these instances, we will get back a response. ```sh # exec'd on the pod -/ $ curl nginx-set-0.nginx-svc.default.svc.west.cluster.local +/ curl nginx-set-0.nginx-svc.default.svc.west.cluster.local " @@ -218,10 +218,10 @@ Now, let's do the same, but this time from the `east` cluster. We will first export the service. ```sh -$ kubectl --context=k3d-west label service nginx-svc mirror.linkerd.io/exported="true" +kubectl --context=k3d-west label service nginx-svc mirror.linkerd.io/exported="true" service/nginx-svc labeled -$ kubectl --context=k3d-east get services +kubectl --context=k3d-east get services NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE kubernetes ClusterIP 10.43.0.1 443/TCP 20h nginx-svc-west ClusterIP None 80/TCP 29s @@ -235,7 +235,7 @@ endpoints for `nginx-svc-west` will have the same hostnames, but each hostname will point to one of the services we see above: ```sh -$ kubectl --context=k3d-east get endpoints nginx-svc-west -o yaml +kubectl --context=k3d-east get endpoints nginx-svc-west -o yaml subsets: - addresses: - hostname: nginx-set-0 @@ -251,17 +251,17 @@ cluster (`west`), will be mirrored as a clusterIP service. We will see in a second why this matters. ```sh -$ kubectl --context=k3d-east get pods +kubectl --context=k3d-east get pods NAME READY STATUS RESTARTS AGE curl-56dc7d945d-96r6p 2/2 Running 0 23m # exec and curl -$ kubectl --context=k3d-east exec pod curl-56dc7d945d-96r6p -it -c curl -- bin/sh +kubectl --context=k3d-east exec pod curl-56dc7d945d-96r6p -it -c curl -- bin/sh # we want to curl the same hostname we see in the endpoints object above. # however, the service and cluster domain will now be different, since we # are in a different cluster. # -/ $ curl nginx-set-0.nginx-svc-west.default.svc.east.cluster.local +/ curl nginx-set-0.nginx-svc-west.default.svc.east.cluster.local @@ -329,8 +329,8 @@ validation. To clean-up, you can remove both clusters entirely using the k3d CLI: ```sh -$ k3d cluster delete east +k3d cluster delete east cluster east deleted -$ k3d cluster delete west +k3d cluster delete west cluster west deleted ``` diff --git a/linkerd.io/content/2.17/tasks/restricting-access.md b/linkerd.io/content/2.17/tasks/restricting-access.md index 5654518600..c9850725f7 100644 --- a/linkerd.io/content/2.17/tasks/restricting-access.md +++ b/linkerd.io/content/2.17/tasks/restricting-access.md @@ -21,9 +21,9 @@ haven't already done this. Inject and install the Emojivoto application: ```bash -$ linkerd inject https://run.linkerd.io/emojivoto.yml | kubectl apply -f - +linkerd inject https://run.linkerd.io/emojivoto.yml | kubectl apply -f - ... -$ linkerd check -n emojivoto --proxy -o short +linkerd check -n emojivoto --proxy -o short ... ``` diff --git a/linkerd.io/content/2.17/tasks/securing-linkerd-tap.md b/linkerd.io/content/2.17/tasks/securing-linkerd-tap.md index 8a802c890c..639f81692f 100644 --- a/linkerd.io/content/2.17/tasks/securing-linkerd-tap.md +++ b/linkerd.io/content/2.17/tasks/securing-linkerd-tap.md @@ -60,7 +60,7 @@ kubectl auth can-i watch deployments.tap.linkerd.io -n emojivoto --as $(whoami) You can also use the Linkerd CLI's `--as` flag to confirm: ```bash -$ linkerd viz tap -n linkerd deploy/linkerd-controller --as $(whoami) +linkerd viz tap -n linkerd deploy/linkerd-controller --as $(whoami) Cannot connect to Linkerd Viz: namespaces is forbidden: User "XXXX" cannot list resource "namespaces" in API group "" at the cluster scope Validate the install with: linkerd viz check ... @@ -77,7 +77,7 @@ To enable tap access to all resources in all namespaces, you may bind your user to the `linkerd-linkerd-tap-admin` ClusterRole, installed by default: ```bash -$ kubectl describe clusterroles/linkerd-linkerd-viz-tap-admin +kubectl describe clusterroles/linkerd-linkerd-viz-tap-admin Name: linkerd-linkerd-viz-tap-admin Labels: component=tap linkerd.io/extension=viz @@ -109,7 +109,7 @@ kubectl create clusterrolebinding \ You can verify you now have tap access with: ```bash -$ linkerd viz tap -n linkerd deploy/linkerd-controller --as $(whoami) +linkerd viz tap -n linkerd deploy/linkerd-controller --as $(whoami) req id=3:0 proxy=in src=10.244.0.1:37392 dst=10.244.0.13:9996 tls=not_provided_by_remote :method=GET :authority=10.244.0.13:9996 :path=/ping ... ``` @@ -143,14 +143,14 @@ Because GCloud provides this additional level of access, there are cases where not. To validate this, check whether your GCloud user has Tap access: ```bash -$ kubectl auth can-i watch pods.tap.linkerd.io --all-namespaces +kubectl auth can-i watch pods.tap.linkerd.io --all-namespaces yes ``` And then validate whether your RBAC user has Tap access: ```bash -$ kubectl auth can-i watch pods.tap.linkerd.io --all-namespaces --as $(gcloud config get-value account) +kubectl auth can-i watch pods.tap.linkerd.io --all-namespaces --as $(gcloud config get-value account) no - no RBAC policy matched ``` @@ -187,14 +187,14 @@ privileges necessary to tap resources. To confirm: ```bash -$ kubectl auth can-i watch pods.tap.linkerd.io --all-namespaces --as system:serviceaccount:linkerd-viz:web +kubectl auth can-i watch pods.tap.linkerd.io --all-namespaces --as system:serviceaccount:linkerd-viz:web yes ``` This access is enabled via a `linkerd-linkerd-viz-web-admin` ClusterRoleBinding: ```bash -$ kubectl describe clusterrolebindings/linkerd-linkerd-viz-web-admin +kubectl describe clusterrolebindings/linkerd-linkerd-viz-web-admin Name: linkerd-linkerd-viz-web-admin Labels: component=web linkerd.io/extensions=viz @@ -227,6 +227,6 @@ kubectl delete clusterrolebindings/linkerd-linkerd-viz-web-admin To confirm: ```bash -$ kubectl auth can-i watch pods.tap.linkerd.io --all-namespaces --as system:serviceaccount:linkerd-viz:web +kubectl auth can-i watch pods.tap.linkerd.io --all-namespaces --as system:serviceaccount:linkerd-viz:web no ``` diff --git a/linkerd.io/content/2.17/tasks/troubleshooting.md b/linkerd.io/content/2.17/tasks/troubleshooting.md index a9efbc7ec1..cf72a5b982 100644 --- a/linkerd.io/content/2.17/tasks/troubleshooting.md +++ b/linkerd.io/content/2.17/tasks/troubleshooting.md @@ -230,7 +230,7 @@ Example failure: Ensure the Linkerd ClusterRoles exist: ```bash -$ kubectl get clusterroles | grep linkerd +kubectl get clusterroles | grep linkerd linkerd-linkerd-destination 9d linkerd-linkerd-identity 9d linkerd-linkerd-proxy-injector 9d @@ -240,7 +240,7 @@ linkerd-policy 9d Also ensure you have permission to create ClusterRoles: ```bash -$ kubectl auth can-i create clusterroles +kubectl auth can-i create clusterroles yes ``` @@ -257,7 +257,7 @@ Example failure: Ensure the Linkerd ClusterRoleBindings exist: ```bash -$ kubectl get clusterrolebindings | grep linkerd +kubectl get clusterrolebindings | grep linkerd linkerd-linkerd-destination 9d linkerd-linkerd-identity 9d linkerd-linkerd-proxy-injector 9d @@ -267,7 +267,7 @@ linkerd-destination-policy 9d Also ensure you have permission to create ClusterRoleBindings: ```bash -$ kubectl auth can-i create clusterrolebindings +kubectl auth can-i create clusterrolebindings yes ``` @@ -284,7 +284,7 @@ Example failure: Ensure the Linkerd ServiceAccounts exist: ```bash -$ kubectl -n linkerd get serviceaccounts +kubectl -n linkerd get serviceaccounts NAME SECRETS AGE default 1 14m linkerd-destination 1 14m @@ -297,7 +297,7 @@ Also ensure you have permission to create ServiceAccounts in the Linkerd namespace: ```bash -$ kubectl -n linkerd auth can-i create serviceaccounts +kubectl -n linkerd auth can-i create serviceaccounts yes ``` @@ -314,7 +314,7 @@ Example failure: Ensure the Linkerd CRD exists: ```bash -$ kubectl get customresourcedefinitions +kubectl get customresourcedefinitions NAME CREATED AT serviceprofiles.linkerd.io 2019-04-25T21:47:31Z ``` @@ -322,7 +322,7 @@ serviceprofiles.linkerd.io 2019-04-25T21:47:31Z Also ensure you have permission to create CRDs: ```bash -$ kubectl auth can-i create customresourcedefinitions +kubectl auth can-i create customresourcedefinitions yes ``` @@ -339,14 +339,14 @@ Example failure: Ensure the Linkerd MutatingWebhookConfigurations exists: ```bash -$ kubectl get mutatingwebhookconfigurations | grep linkerd +kubectl get mutatingwebhookconfigurations | grep linkerd linkerd-proxy-injector-webhook-config 2019-07-01T13:13:26Z ``` Also ensure you have permission to create MutatingWebhookConfigurations: ```bash -$ kubectl auth can-i create mutatingwebhookconfigurations +kubectl auth can-i create mutatingwebhookconfigurations yes ``` @@ -363,14 +363,14 @@ Example failure: Ensure the Linkerd ValidatingWebhookConfiguration exists: ```bash -$ kubectl get validatingwebhookconfigurations | grep linkerd +kubectl get validatingwebhookconfigurations | grep linkerd linkerd-sp-validator-webhook-config 2019-07-01T13:13:26Z ``` Also ensure you have permission to create ValidatingWebhookConfigurations: ```bash -$ kubectl auth can-i create validatingwebhookconfigurations +kubectl auth can-i create validatingwebhookconfigurations yes ``` @@ -418,7 +418,7 @@ Example failure: Ensure the Linkerd ConfigMap exists: ```bash -$ kubectl -n linkerd get configmap/linkerd-config +kubectl -n linkerd get configmap/linkerd-config NAME DATA AGE linkerd-config 3 61m ``` @@ -426,7 +426,7 @@ linkerd-config 3 61m Also ensure you have permission to create ConfigMaps: ```bash -$ kubectl -n linkerd auth can-i create configmap +kubectl -n linkerd auth can-i create configmap yes ``` @@ -780,7 +780,7 @@ Example failure: Verify the state of the control plane pods with: ```bash -$ kubectl -n linkerd get po +kubectl -n linkerd get po NAME READY STATUS RESTARTS AGE linkerd-destination-5fd7b5d466-szgqm 2/2 Running 1 12m linkerd-identity-54df78c479-hbh5m 2/2 Running 0 12m @@ -862,7 +862,7 @@ Ensure you can connect to the Linkerd version check endpoint from the environment the `linkerd` cli is running: ```bash -$ curl "https://versioncheck.linkerd.io/version.json?version=edge-19.1.2&uuid=test-uuid&source=cli" +curl "https://versioncheck.linkerd.io/version.json?version=edge-19.1.2&uuid=test-uuid&source=cli" {"stable":"stable-2.1.0","edge":"edge-19.1.2"} ``` @@ -961,7 +961,7 @@ normally. Example failure: ```bash -$ linkerd check --proxy --namespace foo +linkerd check --proxy --namespace foo ... × data plane namespace exists The "foo" namespace does not exist @@ -1147,7 +1147,7 @@ Example error: Ensure that the linkerd-cni-config ConfigMap exists in the CNI namespace: ```bash -$ kubectl get cm linkerd-cni-config -n linkerd-cni +kubectl get cm linkerd-cni-config -n linkerd-cni NAME PRIV CAPS SELINUX RUNASUSER FSGROUP SUPGROUP READONLYROOTFS VOLUMES linkerd-linkerd-cni-cni false RunAsAny RunAsAny RunAsAny RunAsAny false hostPath,secret ``` @@ -1155,7 +1155,7 @@ linkerd-linkerd-cni-cni false RunAsAny RunAsAny RunAsAny RunAs Also ensure you have permission to create ConfigMaps: ```bash -$ kubectl auth can-i create ConfigMaps +kubectl auth can-i create ConfigMaps yes ``` @@ -1172,7 +1172,7 @@ Example error: Ensure that the cluster role exists: ```bash -$ kubectl get clusterrole linkerd-cni +kubectl get clusterrole linkerd-cni NAME AGE linkerd-cni 54m ``` @@ -1180,7 +1180,7 @@ linkerd-cni 54m Also ensure you have permission to create ClusterRoles: ```bash -$ kubectl auth can-i create ClusterRoles +kubectl auth can-i create ClusterRoles yes ``` @@ -1197,7 +1197,7 @@ Example error: Ensure that the cluster role binding exists: ```bash -$ kubectl get clusterrolebinding linkerd-cni +kubectl get clusterrolebinding linkerd-cni NAME AGE linkerd-cni 54m ``` @@ -1205,7 +1205,7 @@ linkerd-cni 54m Also ensure you have permission to create ClusterRoleBindings: ```bash -$ kubectl auth can-i create ClusterRoleBindings +kubectl auth can-i create ClusterRoleBindings yes ``` @@ -1222,7 +1222,7 @@ Example error: Ensure that the CNI service account exists in the CNI namespace: ```bash -$ kubectl get ServiceAccount linkerd-cni -n linkerd-cni +kubectl get ServiceAccount linkerd-cni -n linkerd-cni NAME SECRETS AGE linkerd-cni 1 45m ``` @@ -1230,7 +1230,7 @@ linkerd-cni 1 45m Also ensure you have permission to create ServiceAccount: ```bash -$ kubectl auth can-i create ServiceAccounts -n linkerd-cni +kubectl auth can-i create ServiceAccounts -n linkerd-cni yes ``` @@ -1247,7 +1247,7 @@ Example error: Ensure that the CNI daemonset exists in the CNI namespace: ```bash -$ kubectl get ds -n linkerd-cni +kubectl get ds -n linkerd-cni NAME DESIRED CURRENT READY UP-TO-DATE AVAILABLE NODE SELECTOR AGE linkerd-cni 1 1 1 1 1 beta.kubernetes.io/os=linux 14m ``` @@ -1255,7 +1255,7 @@ linkerd-cni 1 1 1 1 1 beta.kubernet Also ensure you have permission to create DaemonSets: ```bash -$ kubectl auth can-i create DaemonSets -n linkerd-cni +kubectl auth can-i create DaemonSets -n linkerd-cni yes ``` @@ -1272,7 +1272,7 @@ Example failure: Ensure that all the CNI pods are running: ```bash -$ kubectl get po -n linkerd-cn +kubectl get po -n linkerd-cn NAME READY STATUS RESTARTS AGE linkerd-cni-rzp2q 1/1 Running 0 9m20s linkerd-cni-mf564 1/1 Running 0 9m22s @@ -1282,7 +1282,7 @@ linkerd-cni-p5670 1/1 Running 0 9m25s Ensure that all pods have finished the deployment of the CNI config and binary: ```bash -$ kubectl logs linkerd-cni-rzp2q -n linkerd-cni +kubectl logs linkerd-cni-rzp2q -n linkerd-cni Wrote linkerd CNI binaries to /host/opt/cni/bin Created CNI config /host/etc/cni/net.d/10-kindnet.conflist Done configuring CNI. Sleep=true @@ -1310,7 +1310,7 @@ Make sure multicluster extension is correctly installed and that the `links.multicluster.linkerd.io` CRD is present. ```bash -$ kubectl get crds | grep multicluster +kubectl get crds | grep multicluster NAME CREATED AT links.multicluster.linkerd.io 2021-03-10T09:58:10Z ``` @@ -1400,7 +1400,7 @@ the rules section. Expected rules for `linkerd-service-mirror-access-local-resources` cluster role: ```bash -$ kubectl --context=local get clusterrole linkerd-service-mirror-access-local-resources -o yaml +kubectl --context=local get clusterrole linkerd-service-mirror-access-local-resources -o yaml kind: ClusterRole metadata: labels: @@ -1433,7 +1433,7 @@ rules: Expected rules for `linkerd-service-mirror-read-remote-creds` role: ```bash -$ kubectl --context=local get role linkerd-service-mirror-read-remote-creds -n linkerd-multicluster -o yaml +kubectl --context=local get role linkerd-service-mirror-read-remote-creds -n linkerd-multicluster -o yaml kind: Role metadata: labels: @@ -1466,7 +1466,7 @@ everything to start up. If this is a permanent error, you'll want to validate the state of the controller pod with: ```bash -$ kubectl --all-namespaces get po --selector linkerd.io/control-plane-component=linkerd-service-mirror +kubectl --all-namespaces get po --selector linkerd.io/control-plane-component=linkerd-service-mirror NAME READY STATUS RESTARTS AGE linkerd-service-mirror-7bb8ff5967-zg265 2/2 Running 0 50m ``` @@ -1584,7 +1584,7 @@ Example failure: Ensure the linkerd-viz extension ClusterRoles exist: ```bash -$ kubectl get clusterroles | grep linkerd-viz +kubectl get clusterroles | grep linkerd-viz linkerd-linkerd-viz-metrics-api 2021-01-26T18:02:17Z linkerd-linkerd-viz-prometheus 2021-01-26T18:02:17Z linkerd-linkerd-viz-tap 2021-01-26T18:02:17Z @@ -1595,7 +1595,7 @@ linkerd-linkerd-viz-web-check 2021-01-2 Also ensure you have permission to create ClusterRoles: ```bash -$ kubectl auth can-i create clusterroles +kubectl auth can-i create clusterroles yes ``` @@ -1612,7 +1612,7 @@ Example failure: Ensure the linkerd-viz extension ClusterRoleBindings exist: ```bash -$ kubectl get clusterrolebindings | grep linkerd-viz +kubectl get clusterrolebindings | grep linkerd-viz linkerd-linkerd-viz-metrics-api ClusterRole/linkerd-linkerd-viz-metrics-api 18h linkerd-linkerd-viz-prometheus ClusterRole/linkerd-linkerd-viz-prometheus 18h linkerd-linkerd-viz-tap ClusterRole/linkerd-linkerd-viz-tap 18h @@ -1624,7 +1624,7 @@ linkerd-linkerd-viz-web-check ClusterRole/linkerd-linke Also ensure you have permission to create ClusterRoleBindings: ```bash -$ kubectl auth can-i create clusterrolebindings +kubectl auth can-i create clusterrolebindings yes ``` @@ -1713,7 +1713,7 @@ requirements in the cluster: Ensure all the linkerd-viz pods are injected ```bash -$ kubectl -n linkerd-viz get pods +kubectl -n linkerd-viz get pods NAME READY STATUS RESTARTS AGE grafana-68cddd7cc8-nrv4h 2/2 Running 3 18h metrics-api-77f684f7c7-hnw8r 2/2 Running 2 18h @@ -1737,7 +1737,7 @@ Make sure that the `proxy-injector` is working correctly by running Ensure all the linkerd-viz pods are running with 2/2 ```bash -$ kubectl -n linkerd-viz get pods +kubectl -n linkerd-viz get pods NAME READY STATUS RESTARTS AGE grafana-68cddd7cc8-nrv4h 2/2 Running 3 18h metrics-api-77f684f7c7-hnw8r 2/2 Running 2 18h @@ -1920,7 +1920,7 @@ versions in sync by updating either the CLI or linkerd-jaeger as necessary. Ensure all the jaeger pods are injected ```bash -$ kubectl -n linkerd-jaeger get pods +kubectl -n linkerd-jaeger get pods NAME READY STATUS RESTARTS AGE collector-69cc44dfbc-rhpfg 2/2 Running 0 11s jaeger-6f98d5c979-scqlq 2/2 Running 0 11s @@ -1941,7 +1941,7 @@ Make sure that the `proxy-injector` is working correctly by running Ensure all the linkerd-jaeger pods are running with 2/2 ```bash -$ kubectl -n linkerd-jaeger get pods +kubectl -n linkerd-jaeger get pods NAME READY STATUS RESTARTS AGE jaeger-injector-548684d74b-bcq5h 2/2 Running 0 5s collector-69cc44dfbc-wqf6s 2/2 Running 0 5s @@ -1990,7 +1990,7 @@ Ensure you can connect to the Linkerd Buoyant version check endpoint from the environment the `linkerd` cli is running: ```bash -$ curl https://buoyant.cloud/version.json +curl https://buoyant.cloud/version.json {"linkerd-buoyant":"v0.4.4"} ``` @@ -2055,7 +2055,7 @@ linkerd-buoyant install | kubectl apply -f - Ensure that the cluster role exists: ```bash -$ kubectl get clusterrole buoyant-cloud-agent +kubectl get clusterrole buoyant-cloud-agent NAME CREATED AT buoyant-cloud-agent 2020-11-13T00:59:50Z ``` @@ -2063,7 +2063,7 @@ buoyant-cloud-agent 2020-11-13T00:59:50Z Also ensure you have permission to create ClusterRoles: ```bash -$ kubectl auth can-i create ClusterRoles +kubectl auth can-i create ClusterRoles yes ``` @@ -2078,7 +2078,7 @@ yes Ensure that the cluster role binding exists: ```bash -$ kubectl get clusterrolebinding buoyant-cloud-agent +kubectl get clusterrolebinding buoyant-cloud-agent NAME ROLE AGE buoyant-cloud-agent ClusterRole/buoyant-cloud-agent 301d ``` @@ -2086,7 +2086,7 @@ buoyant-cloud-agent ClusterRole/buoyant-cloud-agent 301d Also ensure you have permission to create ClusterRoleBindings: ```bash -$ kubectl auth can-i create ClusterRoleBindings +kubectl auth can-i create ClusterRoleBindings yes ``` @@ -2101,7 +2101,7 @@ yes Ensure that the service account exists: ```bash -$ kubectl -n buoyant-cloud get serviceaccount buoyant-cloud-agent +kubectl -n buoyant-cloud get serviceaccount buoyant-cloud-agent NAME SECRETS AGE buoyant-cloud-agent 1 301d ``` @@ -2109,7 +2109,7 @@ buoyant-cloud-agent 1 301d Also ensure you have permission to create ServiceAccounts: ```bash -$ kubectl -n buoyant-cloud auth can-i create ServiceAccount +kubectl -n buoyant-cloud auth can-i create ServiceAccount yes ``` @@ -2124,7 +2124,7 @@ yes Ensure that the secret exists: ```bash -$ kubectl -n buoyant-cloud get secret buoyant-cloud-id +kubectl -n buoyant-cloud get secret buoyant-cloud-id NAME TYPE DATA AGE buoyant-cloud-id Opaque 4 301d ``` @@ -2132,7 +2132,7 @@ buoyant-cloud-id Opaque 4 301d Also ensure you have permission to create ServiceAccounts: ```bash -$ kubectl -n buoyant-cloud auth can-i create ServiceAccount +kubectl -n buoyant-cloud auth can-i create ServiceAccount yes ``` @@ -2170,7 +2170,7 @@ everything to start up. If this is a permanent error, you'll want to validate the state of the `buoyant-cloud-agent` Deployment with: ```bash -$ kubectl -n buoyant-cloud get po --selector app=buoyant-cloud-agent +kubectl -n buoyant-cloud get po --selector app=buoyant-cloud-agent NAME READY STATUS RESTARTS AGE buoyant-cloud-agent-6b8c6888d7-htr7d 2/2 Running 0 156m ``` @@ -2193,7 +2193,7 @@ Ensure the `buoyant-cloud-agent` pod is injected, the `READY` column should show `2/2`: ```bash -$ kubectl -n buoyant-cloud get pods --selector app=buoyant-cloud-agent +kubectl -n buoyant-cloud get pods --selector app=buoyant-cloud-agent NAME READY STATUS RESTARTS AGE buoyant-cloud-agent-6b8c6888d7-htr7d 2/2 Running 0 161m ``` @@ -2212,7 +2212,7 @@ Make sure that the `proxy-injector` is working correctly by running Check the version with: ```bash -$ linkerd-buoyant version +linkerd-buoyant version CLI version: v0.4.4 Agent version: v0.4.4 ``` @@ -2271,7 +2271,7 @@ everything to start up. If this is a permanent error, you'll want to validate the state of the `buoyant-cloud-metrics` DaemonSet with: ```bash -$ kubectl -n buoyant-cloud get po --selector app=buoyant-cloud-metrics +kubectl -n buoyant-cloud get po --selector app=buoyant-cloud-metrics NAME READY STATUS RESTARTS AGE buoyant-cloud-metrics-kt9mv 2/2 Running 0 163m buoyant-cloud-metrics-q8jhj 2/2 Running 0 163m @@ -2297,7 +2297,7 @@ Ensure the `buoyant-cloud-metrics` pods are injected, the `READY` column should show `2/2`: ```bash -$ kubectl -n buoyant-cloud get pods --selector app=buoyant-cloud-metrics +kubectl -n buoyant-cloud get pods --selector app=buoyant-cloud-metrics NAME READY STATUS RESTARTS AGE buoyant-cloud-metrics-kt9mv 2/2 Running 0 166m buoyant-cloud-metrics-q8jhj 2/2 Running 0 166m @@ -2319,7 +2319,7 @@ Make sure that the `proxy-injector` is working correctly by running Check the version with: ```bash -$ kubectl -n buoyant-cloud get daemonset/buoyant-cloud-metrics -o jsonpath='{.metadata.labels}' +kubectl -n buoyant-cloud get daemonset/buoyant-cloud-metrics -o jsonpath='{.metadata.labels}' {"app.kubernetes.io/name":"metrics","app.kubernetes.io/part-of":"buoyant-cloud","app.kubernetes.io/version":"v0.4.4"} ``` diff --git a/linkerd.io/content/2.17/tasks/upgrade.md b/linkerd.io/content/2.17/tasks/upgrade.md index 23547217a4..a73f4d54fc 100644 --- a/linkerd.io/content/2.17/tasks/upgrade.md +++ b/linkerd.io/content/2.17/tasks/upgrade.md @@ -379,7 +379,7 @@ Find the release name you used for the `linkerd2` chart, and the namespace where this release stored its config: ```bash -$ helm ls -A +helm ls -A NAME NAMESPACE REVISION UPDATED STATUS CHART APP VERSION linkerd default 1 2021-11-22 17:14:50.751436374 -0500 -05 deployed linkerd2-2.11.1 stable-2.11.1 ``` @@ -412,18 +412,18 @@ the `linkerd-crds`, `linkerd-control-plane` and `linkerd-smi` charts: ```bash # First migrate the CRDs -$ helm -n default get manifest linkerd | \ +helm -n default get manifest linkerd | \ yq 'select(.kind == "CustomResourceDefinition") | .metadata.name' | \ grep -v '\-\-\-' | \ xargs -n1 sh -c \ 'kubectl annotate --overwrite crd/$0 meta.helm.sh/release-name=linkerd-crds meta.helm.sh/release-namespace=linkerd' # Special case for TrafficSplit (only use if you have TrafficSplit CRs) -$ kubectl annotate --overwrite crd/trafficsplits.split.smi-spec.io \ +kubectl annotate --overwrite crd/trafficsplits.split.smi-spec.io \ meta.helm.sh/release-name=linkerd-smi meta.helm.sh/release-namespace=linkerd-smi # Now migrate all the other resources -$ helm -n default get manifest linkerd | \ +helm -n default get manifest linkerd | \ yq 'select(.kind != "CustomResourceDefinition")' | \ yq '.kind, .metadata.name, .metadata.namespace' | \ grep -v '\-\-\-' | @@ -437,14 +437,14 @@ above. ```bash # First make sure you update the helm repo -$ helm repo up +helm repo up # Install the linkerd-crds chart -$ helm install linkerd-crds -n linkerd --create-namespace linkerd/linkerd-crds +helm install linkerd-crds -n linkerd --create-namespace linkerd/linkerd-crds # Install the linkerd-control-plane chart # (remember to add any customizations you retrieved above) -$ helm install linkerd-control-plane \ +helm install linkerd-control-plane \ -n linkerd \ --set-file identityTrustAnchorsPEM=ca.crt \ --set-file identity.issuer.tls.crtPEM=issuer.crt \ @@ -452,8 +452,8 @@ $ helm install linkerd-control-plane \ linkerd/linkerd-control-plane # Optional: if using TrafficSplit CRs -$ helm repo add l5d-smi https://linkerd.github.io/linkerd-smi -$ helm install linkerd-smi -n linkerd-smi --create-namespace l5d-smi/linkerd-smi +helm repo add l5d-smi https://linkerd.github.io/linkerd-smi +helm install linkerd-smi -n linkerd-smi --create-namespace l5d-smi/linkerd-smi ``` ##### Cleaning up the old linkerd2 Helm release @@ -464,7 +464,7 @@ remove the Helm release config for the old `linkerd2` chart (assuming you used the "Secret" storage backend, which is the default): ```bash -$ kubectl -n default delete secret \ +kubectl -n default delete secret \ --field-selector type=helm.sh/release.v1 \ -l name=linkerd,owner=helm ``` diff --git a/linkerd.io/content/2.18/reference/cli/check.md b/linkerd.io/content/2.18/reference/cli/check.md index 7cd61cd237..67a2486908 100644 --- a/linkerd.io/content/2.18/reference/cli/check.md +++ b/linkerd.io/content/2.18/reference/cli/check.md @@ -12,7 +12,7 @@ for a full list of all the possible checks, what they do and how to fix them. ## Example output ```bash -$ linkerd check +linkerd check kubernetes-api -------------- √ can initialize the client diff --git a/linkerd.io/content/2.18/reference/iptables.md b/linkerd.io/content/2.18/reference/iptables.md index 67a7ea89de..9b4d229a59 100644 --- a/linkerd.io/content/2.18/reference/iptables.md +++ b/linkerd.io/content/2.18/reference/iptables.md @@ -164,7 +164,7 @@ Alternatively, if you want to inspect the iptables rules created for a pod, you can retrieve them through the following command: ```bash -$ kubectl -n logs linkerd-init +kubectl -n logs linkerd-init # where is the name of the pod # you want to see the iptables rules for ``` diff --git a/linkerd.io/content/2.18/tasks/configuring-dynamic-request-routing.md b/linkerd.io/content/2.18/tasks/configuring-dynamic-request-routing.md index 004b50ded6..a44d12a1a5 100644 --- a/linkerd.io/content/2.18/tasks/configuring-dynamic-request-routing.md +++ b/linkerd.io/content/2.18/tasks/configuring-dynamic-request-routing.md @@ -67,7 +67,7 @@ Requests to `/echo` on port 9898 to the frontend pod will get forwarded the pod pointed by the Service `backend-a-podinfo`: ```bash -$ curl -sX POST localhost:9898/echo \ +curl -sX POST localhost:9898/echo \ | grep -o 'PODINFO_UI_MESSAGE=. backend' PODINFO_UI_MESSAGE=A backend @@ -132,7 +132,7 @@ the `backend-a-podinfo` Service. The previous requests should still reach `backend-a-podinfo` only: ```bash -$ curl -sX POST localhost:9898/echo \ +curl -sX POST localhost:9898/echo \ | grep -o 'PODINFO_UI_MESSAGE=. backend' PODINFO_UI_MESSAGE=A backend @@ -142,7 +142,7 @@ But if we add the `x-request-id: alternative` header, they get routed to `backend-b-podinfo`: ```bash -$ curl -sX POST \ +curl -sX POST \ -H 'x-request-id: alternative' \ localhost:9898/echo \ | grep -o 'PODINFO_UI_MESSAGE=. backend' diff --git a/linkerd.io/content/2.18/tasks/configuring-per-route-policy.md b/linkerd.io/content/2.18/tasks/configuring-per-route-policy.md index 011c10ff9e..98fb81a708 100644 --- a/linkerd.io/content/2.18/tasks/configuring-per-route-policy.md +++ b/linkerd.io/content/2.18/tasks/configuring-per-route-policy.md @@ -30,7 +30,7 @@ haven't already done this. Inject and install the Books demo application: ```bash -$ kubectl create ns booksapp && \ +kubectl create ns booksapp && \ curl --proto '=https' --tlsv1.2 -sSfL https://run.linkerd.io/booksapp.yml \ | linkerd inject - \ | kubectl -n booksapp apply -f - @@ -44,21 +44,21 @@ run in the `booksapp` namespace. Confirm that the Linkerd data plane was injected successfully: ```bash -$ linkerd check -n booksapp --proxy -o short +linkerd check -n booksapp --proxy -o short ``` You can take a quick look at all the components that were added to your cluster by running: ```bash -$ kubectl -n booksapp get all +kubectl -n booksapp get all ``` Once the rollout has completed successfully, you can access the app itself by port-forwarding `webapp` locally: ```bash -$ kubectl -n booksapp port-forward svc/webapp 7000 & +kubectl -n booksapp port-forward svc/webapp 7000 & ``` Open [http://localhost:7000/](http://localhost:7000/) in your browser to see the @@ -87,7 +87,7 @@ First, let's run the `linkerd viz authz` command to list the authorization resources that currently exist for the `authors` deployment: ```bash -$ linkerd viz authz -n booksapp deploy/authors +linkerd viz authz -n booksapp deploy/authors ROUTE SERVER AUTHORIZATION UNAUTHORIZED SUCCESS RPS LATENCY_P50 LATENCY_P95 LATENCY_P99 default default:all-unauthenticated default/all-unauthenticated 0.0rps 70.31% 8.1rps 1ms 43ms 49ms probe default:all-unauthenticated default/probe 0.0rps 100.00% 0.3rps 1ms 1ms 1ms @@ -124,7 +124,7 @@ Now that we've defined a [`Server`] for the authors `Deployment`, we can run the currently unauthorized: ```bash -$ linkerd viz authz -n booksapp deploy/authors +linkerd viz authz -n booksapp deploy/authors ROUTE SERVER AUTHORIZATION UNAUTHORIZED SUCCESS RPS LATENCY_P50 LATENCY_P95 LATENCY_P99 default authors-server 9.5rps 0.00% 0.0rps 0ms 0ms 0ms probe authors-server default/probe 0.0rps 100.00% 0.1rps 1ms 1ms 1ms @@ -312,7 +312,7 @@ network (0.0.0.0). Running `linkerd viz authz` again, we can now see that our new policies exist: ```bash -$ linkerd viz authz -n booksapp deploy/authors +linkerd viz authz -n booksapp deploy/authors ROUTE SERVER AUTHORIZATION UNAUTHORIZED SUCCESS RPS LATENCY_P50 LATENCY_P95 LATENCY_P99 authors-get-route authors-server authorizationpolicy/authors-get-policy 0.0rps 100.00% 0.1rps 2ms 2ms 2ms authors-probe-route authors-server authorizationpolicy/authors-probe-policy 0.0rps 100.00% 0.1rps 1ms 1ms 1ms @@ -383,7 +383,7 @@ requests, but we haven't _authorized_ requests to that route. Running the requests to `authors-modify-route`: ```bash -$ linkerd viz authz -n booksapp deploy/authors +linkerd viz authz -n booksapp deploy/authors ROUTE SERVER AUTHORIZATION UNAUTHORIZED SUCCESS RPS LATENCY_P50 LATENCY_P95 LATENCY_P99 authors-get-route authors-server authorizationpolicy/authors-get-policy - - - - - - authors-modify-route authors-server 9.7rps 0.00% 0.0rps 0ms 0ms 0ms @@ -442,7 +442,7 @@ Running the `linkerd viz authz` command one last time, we now see that all traffic is authorized: ```bash -$ linkerd viz authz -n booksapp deploy/authors +linkerd viz authz -n booksapp deploy/authors ROUTE SERVER AUTHORIZATION UNAUTHORIZED SUCCESS RPS LATENCY_P50 LATENCY_P95 LATENCY_P99 authors-get-route authors-server authorizationpolicy/authors-get-policy 0.0rps 100.00% 0.1rps 0ms 0ms 0ms authors-modify-route authors-server authorizationpolicy/authors-modify-policy 0.0rps 100.00% 0.0rps 0ms 0ms 0ms diff --git a/linkerd.io/content/2.18/tasks/managing-egress-traffic.md b/linkerd.io/content/2.18/tasks/managing-egress-traffic.md index a43eadb61a..518c4f72f7 100644 --- a/linkerd.io/content/2.18/tasks/managing-egress-traffic.md +++ b/linkerd.io/content/2.18/tasks/managing-egress-traffic.md @@ -70,7 +70,7 @@ Now SSH into the client container and start generating some external traffic: ```bash kubectl -n egress-test exec -it client -c client -- sh -$ while sleep 1; do curl -s http://httpbin.org/get ; done +while sleep 1; do curl -s http://httpbin.org/get ; done ``` In a separate shell, you can use the Linkerd diagnostics command to visualize @@ -235,7 +235,7 @@ Interestingly enough though, if we go back to our client shell and we try to initiate HTTPS traffic to the same service, it will not be allowed: ```bash -~ $ curl -v https://httpbin.org/get +~ curl -v https://httpbin.org/get curl: (35) TLS connect error: error:00000000:lib(0)::reason(0) ``` @@ -458,7 +458,7 @@ Now let's verify all works as expected: ```bash # plaintext traffic goes as expected to the /get path -$ curl http://httpbin.org/get +curl http://httpbin.org/get { "args": {}, "headers": { @@ -472,14 +472,14 @@ $ curl http://httpbin.org/get } # encrypted traffic can target all paths and hosts -$ curl https://httpbin.org/ip +curl https://httpbin.org/ip { "origin": "51.116.126.217" } # arbitrary unencrypted traffic goes to the internal service -$ curl http://google.com +curl http://google.com { "requestUID": "in:http-sid:terminus-grpc:-1-h1:80-190120723", "payload": "You cannot go there right now"} diff --git a/linkerd.io/content/2.18/tasks/multicluster-using-statefulsets.md b/linkerd.io/content/2.18/tasks/multicluster-using-statefulsets.md index 81969979a0..83c638a4ae 100644 --- a/linkerd.io/content/2.18/tasks/multicluster-using-statefulsets.md +++ b/linkerd.io/content/2.18/tasks/multicluster-using-statefulsets.md @@ -48,8 +48,8 @@ The first step is to clone the demo repository on your local machine. ```sh # clone example repository -$ git clone git@github.com:linkerd/l2d-k3d-statefulset.git -$ cd l2d-k3d-statefulset +git clone git@github.com:linkerd/l2d-k3d-statefulset.git +cd l2d-k3d-statefulset ``` The second step consists of creating two `k3d` clusters named `east` and `west`, @@ -60,10 +60,10 @@ everything. ```sh # create k3d clusters -$ ./create.sh +./create.sh # list the clusters -$ k3d cluster list +k3d cluster list NAME SERVERS AGENTS LOADBALANCER east 1/1 0/0 true west 1/1 0/0 true @@ -77,10 +77,10 @@ controllers and links are generated for both clusters. ```sh # Install Linkerd and multicluster, output to check should be a success -$ ./install.sh +./install.sh # Next, link the two clusters together -$ ./link.sh +./link.sh ``` Perfect! If you've made it this far with no errors, then it's a good sign. In @@ -100,17 +100,17 @@ communication. First, we will deploy our pods and services: ```sh # deploy services and mesh namespaces -$ ./deploy.sh +./deploy.sh # verify both clusters # # verify east -$ kubectl --context=k3d-east get pods +kubectl --context=k3d-east get pods NAME READY STATUS RESTARTS AGE curl-56dc7d945d-96r6p 2/2 Running 0 7s # verify west has headless service -$ kubectl --context=k3d-west get services +kubectl --context=k3d-west get services NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE kubernetes ClusterIP 10.43.0.1 443/TCP 10m nginx-svc ClusterIP None 80/TCP 8s @@ -118,7 +118,7 @@ nginx-svc ClusterIP None 80/TCP 8s # verify west has statefulset # # this may take a while to come up -$ kubectl --context=k3d-west get pods +kubectl --context=k3d-west get pods NAME READY STATUS RESTARTS AGE nginx-set-0 2/2 Running 0 53s nginx-set-1 2/2 Running 0 43s @@ -129,7 +129,7 @@ Before we go further, let's have a look at the endpoints object for the `nginx-svc`: ```sh -$ kubectl --context=k3d-west get endpoints nginx-svc -o yaml +kubectl --context=k3d-west get endpoints nginx-svc -o yaml ... subsets: - addresses: @@ -169,23 +169,23 @@ would get an answer back. We can test this out by applying the curl pod to the `west` cluster: ```sh -$ kubectl --context=k3d-west apply -f east/curl.yml -$ kubectl --context=k3d-west get pods +kubectl --context=k3d-west apply -f east/curl.yml +kubectl --context=k3d-west get pods NAME READY STATUS RESTARTS AGE nginx-set-0 2/2 Running 0 5m8s nginx-set-1 2/2 Running 0 4m58s nginx-set-2 2/2 Running 0 4m51s curl-56dc7d945d-s4n8j 0/2 PodInitializing 0 4s -$ kubectl --context=k3d-west exec -it curl-56dc7d945d-s4n8j -c curl -- sh -/$ # prompt for curl pod +kubectl --context=k3d-west exec -it curl-56dc7d945d-s4n8j -c curl -- sh +/# prompt for curl pod ``` If we now curl one of these instances, we will get back a response. ```sh # exec'd on the pod -/ $ curl nginx-set-0.nginx-svc.default.svc.west.cluster.local +/ curl nginx-set-0.nginx-svc.default.svc.west.cluster.local " @@ -217,10 +217,10 @@ Now, let's do the same, but this time from the `east` cluster. We will first export the service. ```sh -$ kubectl --context=k3d-west label service nginx-svc mirror.linkerd.io/exported="true" +kubectl --context=k3d-west label service nginx-svc mirror.linkerd.io/exported="true" service/nginx-svc labeled -$ kubectl --context=k3d-east get services +kubectl --context=k3d-east get services NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE kubernetes ClusterIP 10.43.0.1 443/TCP 20h nginx-svc-west ClusterIP None 80/TCP 29s @@ -234,7 +234,7 @@ endpoints for `nginx-svc-west` will have the same hostnames, but each hostname will point to one of the services we see above: ```sh -$ kubectl --context=k3d-east get endpoints nginx-svc-k3d-west -o yaml +kubectl --context=k3d-east get endpoints nginx-svc-k3d-west -o yaml subsets: - addresses: - hostname: nginx-set-0 @@ -250,17 +250,17 @@ cluster (`west`), will be mirrored as a clusterIP service. We will see in a second why this matters. ```sh -$ kubectl --context=k3d-east get pods +kubectl --context=k3d-east get pods NAME READY STATUS RESTARTS AGE curl-56dc7d945d-96r6p 2/2 Running 0 23m # exec and curl -$ kubectl --context=k3d-east exec curl-56dc7d945d-96r6p -it -c curl -- sh +kubectl --context=k3d-east exec curl-56dc7d945d-96r6p -it -c curl -- sh # we want to curl the same hostname we see in the endpoints object above. # however, the service and cluster domain will now be different, since we # are in a different cluster. # -/ $ curl nginx-set-0.nginx-svc-k3d-west.default.svc.east.cluster.local +/ curl nginx-set-0.nginx-svc-k3d-west.default.svc.east.cluster.local @@ -328,8 +328,8 @@ validation. To clean-up, you can remove both clusters entirely using the k3d CLI: ```sh -$ k3d cluster delete east +k3d cluster delete east cluster east deleted -$ k3d cluster delete west +k3d cluster delete west cluster west deleted ``` diff --git a/linkerd.io/content/2.18/tasks/multicluster.md b/linkerd.io/content/2.18/tasks/multicluster.md index 3a80b3f3ed..2779b7616a 100644 --- a/linkerd.io/content/2.18/tasks/multicluster.md +++ b/linkerd.io/content/2.18/tasks/multicluster.md @@ -506,9 +506,9 @@ To cleanup the multicluster control plane, you can run: ```bash # Delete the link CR -$ kubectl --context=west -n linkerd-multicluster delete links east +kubectl --context=west -n linkerd-multicluster delete links east # Delete the test namespace and uninstall multicluster -$ for ctx in west east; do \ +for ctx in west east; do \ kubectl --context=${ctx} delete ns test; \ linkerd --context=${ctx} multicluster uninstall | kubectl --context=${ctx} delete -f - ; \ done diff --git a/linkerd.io/content/2.18/tasks/restricting-access.md b/linkerd.io/content/2.18/tasks/restricting-access.md index 5654518600..c9850725f7 100644 --- a/linkerd.io/content/2.18/tasks/restricting-access.md +++ b/linkerd.io/content/2.18/tasks/restricting-access.md @@ -21,9 +21,9 @@ haven't already done this. Inject and install the Emojivoto application: ```bash -$ linkerd inject https://run.linkerd.io/emojivoto.yml | kubectl apply -f - +linkerd inject https://run.linkerd.io/emojivoto.yml | kubectl apply -f - ... -$ linkerd check -n emojivoto --proxy -o short +linkerd check -n emojivoto --proxy -o short ... ``` diff --git a/linkerd.io/content/2.18/tasks/securing-linkerd-tap.md b/linkerd.io/content/2.18/tasks/securing-linkerd-tap.md index 8a802c890c..639f81692f 100644 --- a/linkerd.io/content/2.18/tasks/securing-linkerd-tap.md +++ b/linkerd.io/content/2.18/tasks/securing-linkerd-tap.md @@ -60,7 +60,7 @@ kubectl auth can-i watch deployments.tap.linkerd.io -n emojivoto --as $(whoami) You can also use the Linkerd CLI's `--as` flag to confirm: ```bash -$ linkerd viz tap -n linkerd deploy/linkerd-controller --as $(whoami) +linkerd viz tap -n linkerd deploy/linkerd-controller --as $(whoami) Cannot connect to Linkerd Viz: namespaces is forbidden: User "XXXX" cannot list resource "namespaces" in API group "" at the cluster scope Validate the install with: linkerd viz check ... @@ -77,7 +77,7 @@ To enable tap access to all resources in all namespaces, you may bind your user to the `linkerd-linkerd-tap-admin` ClusterRole, installed by default: ```bash -$ kubectl describe clusterroles/linkerd-linkerd-viz-tap-admin +kubectl describe clusterroles/linkerd-linkerd-viz-tap-admin Name: linkerd-linkerd-viz-tap-admin Labels: component=tap linkerd.io/extension=viz @@ -109,7 +109,7 @@ kubectl create clusterrolebinding \ You can verify you now have tap access with: ```bash -$ linkerd viz tap -n linkerd deploy/linkerd-controller --as $(whoami) +linkerd viz tap -n linkerd deploy/linkerd-controller --as $(whoami) req id=3:0 proxy=in src=10.244.0.1:37392 dst=10.244.0.13:9996 tls=not_provided_by_remote :method=GET :authority=10.244.0.13:9996 :path=/ping ... ``` @@ -143,14 +143,14 @@ Because GCloud provides this additional level of access, there are cases where not. To validate this, check whether your GCloud user has Tap access: ```bash -$ kubectl auth can-i watch pods.tap.linkerd.io --all-namespaces +kubectl auth can-i watch pods.tap.linkerd.io --all-namespaces yes ``` And then validate whether your RBAC user has Tap access: ```bash -$ kubectl auth can-i watch pods.tap.linkerd.io --all-namespaces --as $(gcloud config get-value account) +kubectl auth can-i watch pods.tap.linkerd.io --all-namespaces --as $(gcloud config get-value account) no - no RBAC policy matched ``` @@ -187,14 +187,14 @@ privileges necessary to tap resources. To confirm: ```bash -$ kubectl auth can-i watch pods.tap.linkerd.io --all-namespaces --as system:serviceaccount:linkerd-viz:web +kubectl auth can-i watch pods.tap.linkerd.io --all-namespaces --as system:serviceaccount:linkerd-viz:web yes ``` This access is enabled via a `linkerd-linkerd-viz-web-admin` ClusterRoleBinding: ```bash -$ kubectl describe clusterrolebindings/linkerd-linkerd-viz-web-admin +kubectl describe clusterrolebindings/linkerd-linkerd-viz-web-admin Name: linkerd-linkerd-viz-web-admin Labels: component=web linkerd.io/extensions=viz @@ -227,6 +227,6 @@ kubectl delete clusterrolebindings/linkerd-linkerd-viz-web-admin To confirm: ```bash -$ kubectl auth can-i watch pods.tap.linkerd.io --all-namespaces --as system:serviceaccount:linkerd-viz:web +kubectl auth can-i watch pods.tap.linkerd.io --all-namespaces --as system:serviceaccount:linkerd-viz:web no ``` diff --git a/linkerd.io/content/2.18/tasks/troubleshooting.md b/linkerd.io/content/2.18/tasks/troubleshooting.md index ca2b5b104d..396b0dcda9 100644 --- a/linkerd.io/content/2.18/tasks/troubleshooting.md +++ b/linkerd.io/content/2.18/tasks/troubleshooting.md @@ -230,7 +230,7 @@ Example failure: Ensure the Linkerd ClusterRoles exist: ```bash -$ kubectl get clusterroles | grep linkerd +kubectl get clusterroles | grep linkerd linkerd-linkerd-destination 9d linkerd-linkerd-identity 9d linkerd-linkerd-proxy-injector 9d @@ -240,7 +240,7 @@ linkerd-policy 9d Also ensure you have permission to create ClusterRoles: ```bash -$ kubectl auth can-i create clusterroles +kubectl auth can-i create clusterroles yes ``` @@ -257,7 +257,7 @@ Example failure: Ensure the Linkerd ClusterRoleBindings exist: ```bash -$ kubectl get clusterrolebindings | grep linkerd +kubectl get clusterrolebindings | grep linkerd linkerd-linkerd-destination 9d linkerd-linkerd-identity 9d linkerd-linkerd-proxy-injector 9d @@ -267,7 +267,7 @@ linkerd-destination-policy 9d Also ensure you have permission to create ClusterRoleBindings: ```bash -$ kubectl auth can-i create clusterrolebindings +kubectl auth can-i create clusterrolebindings yes ``` @@ -284,7 +284,7 @@ Example failure: Ensure the Linkerd ServiceAccounts exist: ```bash -$ kubectl -n linkerd get serviceaccounts +kubectl -n linkerd get serviceaccounts NAME SECRETS AGE default 1 14m linkerd-destination 1 14m @@ -297,7 +297,7 @@ Also ensure you have permission to create ServiceAccounts in the Linkerd namespace: ```bash -$ kubectl -n linkerd auth can-i create serviceaccounts +kubectl -n linkerd auth can-i create serviceaccounts yes ``` @@ -314,7 +314,7 @@ Example failure: Ensure the Linkerd CRD exists: ```bash -$ kubectl get customresourcedefinitions +kubectl get customresourcedefinitions NAME CREATED AT serviceprofiles.linkerd.io 2019-04-25T21:47:31Z ``` @@ -322,7 +322,7 @@ serviceprofiles.linkerd.io 2019-04-25T21:47:31Z Also ensure you have permission to create CRDs: ```bash -$ kubectl auth can-i create customresourcedefinitions +kubectl auth can-i create customresourcedefinitions yes ``` @@ -339,14 +339,14 @@ Example failure: Ensure the Linkerd MutatingWebhookConfigurations exists: ```bash -$ kubectl get mutatingwebhookconfigurations | grep linkerd +kubectl get mutatingwebhookconfigurations | grep linkerd linkerd-proxy-injector-webhook-config 2019-07-01T13:13:26Z ``` Also ensure you have permission to create MutatingWebhookConfigurations: ```bash -$ kubectl auth can-i create mutatingwebhookconfigurations +kubectl auth can-i create mutatingwebhookconfigurations yes ``` @@ -363,14 +363,14 @@ Example failure: Ensure the Linkerd ValidatingWebhookConfiguration exists: ```bash -$ kubectl get validatingwebhookconfigurations | grep linkerd +kubectl get validatingwebhookconfigurations | grep linkerd linkerd-sp-validator-webhook-config 2019-07-01T13:13:26Z ``` Also ensure you have permission to create ValidatingWebhookConfigurations: ```bash -$ kubectl auth can-i create validatingwebhookconfigurations +kubectl auth can-i create validatingwebhookconfigurations yes ``` @@ -418,7 +418,7 @@ Example failure: Ensure the Linkerd ConfigMap exists: ```bash -$ kubectl -n linkerd get configmap/linkerd-config +kubectl -n linkerd get configmap/linkerd-config NAME DATA AGE linkerd-config 3 61m ``` @@ -426,7 +426,7 @@ linkerd-config 3 61m Also ensure you have permission to create ConfigMaps: ```bash -$ kubectl -n linkerd auth can-i create configmap +kubectl -n linkerd auth can-i create configmap yes ``` @@ -780,7 +780,7 @@ Example failure: Verify the state of the control plane pods with: ```bash -$ kubectl -n linkerd get po +kubectl -n linkerd get po NAME READY STATUS RESTARTS AGE linkerd-destination-5fd7b5d466-szgqm 2/2 Running 1 12m linkerd-identity-54df78c479-hbh5m 2/2 Running 0 12m @@ -862,7 +862,7 @@ Ensure you can connect to the Linkerd version check endpoint from the environment the `linkerd` cli is running: ```bash -$ curl "https://versioncheck.linkerd.io/version.json?version=edge-19.1.2&uuid=test-uuid&source=cli" +curl "https://versioncheck.linkerd.io/version.json?version=edge-19.1.2&uuid=test-uuid&source=cli" {"stable":"stable-2.1.0","edge":"edge-19.1.2"} ``` @@ -961,7 +961,7 @@ normally. Example failure: ```bash -$ linkerd check --proxy --namespace foo +linkerd check --proxy --namespace foo ... × data plane namespace exists The "foo" namespace does not exist @@ -1147,7 +1147,7 @@ Example error: Ensure that the linkerd-cni-config ConfigMap exists in the CNI namespace: ```bash -$ kubectl get cm linkerd-cni-config -n linkerd-cni +kubectl get cm linkerd-cni-config -n linkerd-cni NAME PRIV CAPS SELINUX RUNASUSER FSGROUP SUPGROUP READONLYROOTFS VOLUMES linkerd-linkerd-cni-cni false RunAsAny RunAsAny RunAsAny RunAsAny false hostPath,secret ``` @@ -1155,7 +1155,7 @@ linkerd-linkerd-cni-cni false RunAsAny RunAsAny RunAsAny RunAs Also ensure you have permission to create ConfigMaps: ```bash -$ kubectl auth can-i create ConfigMaps +kubectl auth can-i create ConfigMaps yes ``` @@ -1172,7 +1172,7 @@ Example error: Ensure that the cluster role exists: ```bash -$ kubectl get clusterrole linkerd-cni +kubectl get clusterrole linkerd-cni NAME AGE linkerd-cni 54m ``` @@ -1180,7 +1180,7 @@ linkerd-cni 54m Also ensure you have permission to create ClusterRoles: ```bash -$ kubectl auth can-i create ClusterRoles +kubectl auth can-i create ClusterRoles yes ``` @@ -1197,7 +1197,7 @@ Example error: Ensure that the cluster role binding exists: ```bash -$ kubectl get clusterrolebinding linkerd-cni +kubectl get clusterrolebinding linkerd-cni NAME AGE linkerd-cni 54m ``` @@ -1205,7 +1205,7 @@ linkerd-cni 54m Also ensure you have permission to create ClusterRoleBindings: ```bash -$ kubectl auth can-i create ClusterRoleBindings +kubectl auth can-i create ClusterRoleBindings yes ``` @@ -1222,7 +1222,7 @@ Example error: Ensure that the CNI service account exists in the CNI namespace: ```bash -$ kubectl get ServiceAccount linkerd-cni -n linkerd-cni +kubectl get ServiceAccount linkerd-cni -n linkerd-cni NAME SECRETS AGE linkerd-cni 1 45m ``` @@ -1230,7 +1230,7 @@ linkerd-cni 1 45m Also ensure you have permission to create ServiceAccount: ```bash -$ kubectl auth can-i create ServiceAccounts -n linkerd-cni +kubectl auth can-i create ServiceAccounts -n linkerd-cni yes ``` @@ -1247,7 +1247,7 @@ Example error: Ensure that the CNI daemonset exists in the CNI namespace: ```bash -$ kubectl get ds -n linkerd-cni +kubectl get ds -n linkerd-cni NAME DESIRED CURRENT READY UP-TO-DATE AVAILABLE NODE SELECTOR AGE linkerd-cni 1 1 1 1 1 beta.kubernetes.io/os=linux 14m ``` @@ -1255,7 +1255,7 @@ linkerd-cni 1 1 1 1 1 beta.kubernet Also ensure you have permission to create DaemonSets: ```bash -$ kubectl auth can-i create DaemonSets -n linkerd-cni +kubectl auth can-i create DaemonSets -n linkerd-cni yes ``` @@ -1272,7 +1272,7 @@ Example failure: Ensure that all the CNI pods are running: ```bash -$ kubectl get po -n linkerd-cn +kubectl get po -n linkerd-cn NAME READY STATUS RESTARTS AGE linkerd-cni-rzp2q 1/1 Running 0 9m20s linkerd-cni-mf564 1/1 Running 0 9m22s @@ -1282,7 +1282,7 @@ linkerd-cni-p5670 1/1 Running 0 9m25s Ensure that all pods have finished the deployment of the CNI config and binary: ```bash -$ kubectl logs linkerd-cni-rzp2q -n linkerd-cni +kubectl logs linkerd-cni-rzp2q -n linkerd-cni Wrote linkerd CNI binaries to /host/opt/cni/bin Created CNI config /host/etc/cni/net.d/10-kindnet.conflist Done configuring CNI. Sleep=true @@ -1310,7 +1310,7 @@ Make sure multicluster extension is correctly installed and that the `links.multicluster.linkerd.io` CRD is present. ```bash -$ kubectl get crds | grep multicluster +kubectl get crds | grep multicluster NAME CREATED AT links.multicluster.linkerd.io 2021-03-10T09:58:10Z ``` @@ -1400,7 +1400,7 @@ the rules section. Expected rules for `linkerd-service-mirror-access-local-resources` cluster role: ```bash -$ kubectl --context=local get clusterrole linkerd-service-mirror-access-local-resources -o yaml +kubectl --context=local get clusterrole linkerd-service-mirror-access-local-resources -o yaml kind: ClusterRole metadata: labels: @@ -1433,7 +1433,7 @@ rules: Expected rules for `linkerd-service-mirror-read-remote-creds` role: ```bash -$ kubectl --context=local get role linkerd-service-mirror-read-remote-creds -n linkerd-multicluster -o yaml +kubectl --context=local get role linkerd-service-mirror-read-remote-creds -n linkerd-multicluster -o yaml kind: Role metadata: labels: @@ -1466,7 +1466,7 @@ everything to start up. If this is a permanent error, you'll want to validate the state of the controller pod with: ```bash -$ kubectl --all-namespaces get po --selector linkerd.io/control-plane-component=linkerd-service-mirror +kubectl --all-namespaces get po --selector linkerd.io/control-plane-component=linkerd-service-mirror NAME READY STATUS RESTARTS AGE linkerd-service-mirror-7bb8ff5967-zg265 2/2 Running 0 50m ``` @@ -1612,7 +1612,7 @@ Example failure: Ensure the linkerd-viz extension ClusterRoles exist: ```bash -$ kubectl get clusterroles | grep linkerd-viz +kubectl get clusterroles | grep linkerd-viz linkerd-linkerd-viz-metrics-api 2021-01-26T18:02:17Z linkerd-linkerd-viz-prometheus 2021-01-26T18:02:17Z linkerd-linkerd-viz-tap 2021-01-26T18:02:17Z @@ -1623,7 +1623,7 @@ linkerd-linkerd-viz-web-check 2021-01-2 Also ensure you have permission to create ClusterRoles: ```bash -$ kubectl auth can-i create clusterroles +kubectl auth can-i create clusterroles yes ``` @@ -1640,7 +1640,7 @@ Example failure: Ensure the linkerd-viz extension ClusterRoleBindings exist: ```bash -$ kubectl get clusterrolebindings | grep linkerd-viz +kubectl get clusterrolebindings | grep linkerd-viz linkerd-linkerd-viz-metrics-api ClusterRole/linkerd-linkerd-viz-metrics-api 18h linkerd-linkerd-viz-prometheus ClusterRole/linkerd-linkerd-viz-prometheus 18h linkerd-linkerd-viz-tap ClusterRole/linkerd-linkerd-viz-tap 18h @@ -1652,7 +1652,7 @@ linkerd-linkerd-viz-web-check ClusterRole/linkerd-linke Also ensure you have permission to create ClusterRoleBindings: ```bash -$ kubectl auth can-i create clusterrolebindings +kubectl auth can-i create clusterrolebindings yes ``` @@ -1741,7 +1741,7 @@ requirements in the cluster: Ensure all the linkerd-viz pods are injected ```bash -$ kubectl -n linkerd-viz get pods +kubectl -n linkerd-viz get pods NAME READY STATUS RESTARTS AGE grafana-68cddd7cc8-nrv4h 2/2 Running 3 18h metrics-api-77f684f7c7-hnw8r 2/2 Running 2 18h @@ -1765,7 +1765,7 @@ Make sure that the `proxy-injector` is working correctly by running Ensure all the linkerd-viz pods are running with 2/2 ```bash -$ kubectl -n linkerd-viz get pods +kubectl -n linkerd-viz get pods NAME READY STATUS RESTARTS AGE grafana-68cddd7cc8-nrv4h 2/2 Running 3 18h metrics-api-77f684f7c7-hnw8r 2/2 Running 2 18h @@ -1948,7 +1948,7 @@ versions in sync by updating either the CLI or linkerd-jaeger as necessary. Ensure all the jaeger pods are injected ```bash -$ kubectl -n linkerd-jaeger get pods +kubectl -n linkerd-jaeger get pods NAME READY STATUS RESTARTS AGE collector-69cc44dfbc-rhpfg 2/2 Running 0 11s jaeger-6f98d5c979-scqlq 2/2 Running 0 11s @@ -1969,7 +1969,7 @@ Make sure that the `proxy-injector` is working correctly by running Ensure all the linkerd-jaeger pods are running with 2/2 ```bash -$ kubectl -n linkerd-jaeger get pods +kubectl -n linkerd-jaeger get pods NAME READY STATUS RESTARTS AGE jaeger-injector-548684d74b-bcq5h 2/2 Running 0 5s collector-69cc44dfbc-wqf6s 2/2 Running 0 5s @@ -2018,7 +2018,7 @@ Ensure you can connect to the Linkerd Buoyant version check endpoint from the environment the `linkerd` cli is running: ```bash -$ curl https://buoyant.cloud/version.json +curl https://buoyant.cloud/version.json {"linkerd-buoyant":"v0.4.4"} ``` @@ -2083,7 +2083,7 @@ linkerd-buoyant install | kubectl apply -f - Ensure that the cluster role exists: ```bash -$ kubectl get clusterrole buoyant-cloud-agent +kubectl get clusterrole buoyant-cloud-agent NAME CREATED AT buoyant-cloud-agent 2020-11-13T00:59:50Z ``` @@ -2091,7 +2091,7 @@ buoyant-cloud-agent 2020-11-13T00:59:50Z Also ensure you have permission to create ClusterRoles: ```bash -$ kubectl auth can-i create ClusterRoles +kubectl auth can-i create ClusterRoles yes ``` @@ -2106,7 +2106,7 @@ yes Ensure that the cluster role binding exists: ```bash -$ kubectl get clusterrolebinding buoyant-cloud-agent +kubectl get clusterrolebinding buoyant-cloud-agent NAME ROLE AGE buoyant-cloud-agent ClusterRole/buoyant-cloud-agent 301d ``` @@ -2114,7 +2114,7 @@ buoyant-cloud-agent ClusterRole/buoyant-cloud-agent 301d Also ensure you have permission to create ClusterRoleBindings: ```bash -$ kubectl auth can-i create ClusterRoleBindings +kubectl auth can-i create ClusterRoleBindings yes ``` @@ -2129,7 +2129,7 @@ yes Ensure that the service account exists: ```bash -$ kubectl -n buoyant-cloud get serviceaccount buoyant-cloud-agent +kubectl -n buoyant-cloud get serviceaccount buoyant-cloud-agent NAME SECRETS AGE buoyant-cloud-agent 1 301d ``` @@ -2137,7 +2137,7 @@ buoyant-cloud-agent 1 301d Also ensure you have permission to create ServiceAccounts: ```bash -$ kubectl -n buoyant-cloud auth can-i create ServiceAccount +kubectl -n buoyant-cloud auth can-i create ServiceAccount yes ``` @@ -2152,7 +2152,7 @@ yes Ensure that the secret exists: ```bash -$ kubectl -n buoyant-cloud get secret buoyant-cloud-id +kubectl -n buoyant-cloud get secret buoyant-cloud-id NAME TYPE DATA AGE buoyant-cloud-id Opaque 4 301d ``` @@ -2160,7 +2160,7 @@ buoyant-cloud-id Opaque 4 301d Also ensure you have permission to create ServiceAccounts: ```bash -$ kubectl -n buoyant-cloud auth can-i create ServiceAccount +kubectl -n buoyant-cloud auth can-i create ServiceAccount yes ``` @@ -2198,7 +2198,7 @@ everything to start up. If this is a permanent error, you'll want to validate the state of the `buoyant-cloud-agent` Deployment with: ```bash -$ kubectl -n buoyant-cloud get po --selector app=buoyant-cloud-agent +kubectl -n buoyant-cloud get po --selector app=buoyant-cloud-agent NAME READY STATUS RESTARTS AGE buoyant-cloud-agent-6b8c6888d7-htr7d 2/2 Running 0 156m ``` @@ -2221,7 +2221,7 @@ Ensure the `buoyant-cloud-agent` pod is injected, the `READY` column should show `2/2`: ```bash -$ kubectl -n buoyant-cloud get pods --selector app=buoyant-cloud-agent +kubectl -n buoyant-cloud get pods --selector app=buoyant-cloud-agent NAME READY STATUS RESTARTS AGE buoyant-cloud-agent-6b8c6888d7-htr7d 2/2 Running 0 161m ``` @@ -2240,7 +2240,7 @@ Make sure that the `proxy-injector` is working correctly by running Check the version with: ```bash -$ linkerd-buoyant version +linkerd-buoyant version CLI version: v0.4.4 Agent version: v0.4.4 ``` @@ -2299,7 +2299,7 @@ everything to start up. If this is a permanent error, you'll want to validate the state of the `buoyant-cloud-metrics` DaemonSet with: ```bash -$ kubectl -n buoyant-cloud get po --selector app=buoyant-cloud-metrics +kubectl -n buoyant-cloud get po --selector app=buoyant-cloud-metrics NAME READY STATUS RESTARTS AGE buoyant-cloud-metrics-kt9mv 2/2 Running 0 163m buoyant-cloud-metrics-q8jhj 2/2 Running 0 163m @@ -2325,7 +2325,7 @@ Ensure the `buoyant-cloud-metrics` pods are injected, the `READY` column should show `2/2`: ```bash -$ kubectl -n buoyant-cloud get pods --selector app=buoyant-cloud-metrics +kubectl -n buoyant-cloud get pods --selector app=buoyant-cloud-metrics NAME READY STATUS RESTARTS AGE buoyant-cloud-metrics-kt9mv 2/2 Running 0 166m buoyant-cloud-metrics-q8jhj 2/2 Running 0 166m @@ -2347,7 +2347,7 @@ Make sure that the `proxy-injector` is working correctly by running Check the version with: ```bash -$ kubectl -n buoyant-cloud get daemonset/buoyant-cloud-metrics -o jsonpath='{.metadata.labels}' +kubectl -n buoyant-cloud get daemonset/buoyant-cloud-metrics -o jsonpath='{.metadata.labels}' {"app.kubernetes.io/name":"metrics","app.kubernetes.io/part-of":"buoyant-cloud","app.kubernetes.io/version":"v0.4.4"} ``` diff --git a/linkerd.io/content/2.19/reference/cli/check.md b/linkerd.io/content/2.19/reference/cli/check.md index 7cd61cd237..67a2486908 100644 --- a/linkerd.io/content/2.19/reference/cli/check.md +++ b/linkerd.io/content/2.19/reference/cli/check.md @@ -12,7 +12,7 @@ for a full list of all the possible checks, what they do and how to fix them. ## Example output ```bash -$ linkerd check +linkerd check kubernetes-api -------------- √ can initialize the client diff --git a/linkerd.io/content/2.19/reference/iptables.md b/linkerd.io/content/2.19/reference/iptables.md index 67a7ea89de..9b4d229a59 100644 --- a/linkerd.io/content/2.19/reference/iptables.md +++ b/linkerd.io/content/2.19/reference/iptables.md @@ -164,7 +164,7 @@ Alternatively, if you want to inspect the iptables rules created for a pod, you can retrieve them through the following command: ```bash -$ kubectl -n logs linkerd-init +kubectl -n logs linkerd-init # where is the name of the pod # you want to see the iptables rules for ``` diff --git a/linkerd.io/content/2.19/tasks/configuring-dynamic-request-routing.md b/linkerd.io/content/2.19/tasks/configuring-dynamic-request-routing.md index 004b50ded6..a44d12a1a5 100644 --- a/linkerd.io/content/2.19/tasks/configuring-dynamic-request-routing.md +++ b/linkerd.io/content/2.19/tasks/configuring-dynamic-request-routing.md @@ -67,7 +67,7 @@ Requests to `/echo` on port 9898 to the frontend pod will get forwarded the pod pointed by the Service `backend-a-podinfo`: ```bash -$ curl -sX POST localhost:9898/echo \ +curl -sX POST localhost:9898/echo \ | grep -o 'PODINFO_UI_MESSAGE=. backend' PODINFO_UI_MESSAGE=A backend @@ -132,7 +132,7 @@ the `backend-a-podinfo` Service. The previous requests should still reach `backend-a-podinfo` only: ```bash -$ curl -sX POST localhost:9898/echo \ +curl -sX POST localhost:9898/echo \ | grep -o 'PODINFO_UI_MESSAGE=. backend' PODINFO_UI_MESSAGE=A backend @@ -142,7 +142,7 @@ But if we add the `x-request-id: alternative` header, they get routed to `backend-b-podinfo`: ```bash -$ curl -sX POST \ +curl -sX POST \ -H 'x-request-id: alternative' \ localhost:9898/echo \ | grep -o 'PODINFO_UI_MESSAGE=. backend' diff --git a/linkerd.io/content/2.19/tasks/configuring-per-route-policy.md b/linkerd.io/content/2.19/tasks/configuring-per-route-policy.md index 011c10ff9e..98fb81a708 100644 --- a/linkerd.io/content/2.19/tasks/configuring-per-route-policy.md +++ b/linkerd.io/content/2.19/tasks/configuring-per-route-policy.md @@ -30,7 +30,7 @@ haven't already done this. Inject and install the Books demo application: ```bash -$ kubectl create ns booksapp && \ +kubectl create ns booksapp && \ curl --proto '=https' --tlsv1.2 -sSfL https://run.linkerd.io/booksapp.yml \ | linkerd inject - \ | kubectl -n booksapp apply -f - @@ -44,21 +44,21 @@ run in the `booksapp` namespace. Confirm that the Linkerd data plane was injected successfully: ```bash -$ linkerd check -n booksapp --proxy -o short +linkerd check -n booksapp --proxy -o short ``` You can take a quick look at all the components that were added to your cluster by running: ```bash -$ kubectl -n booksapp get all +kubectl -n booksapp get all ``` Once the rollout has completed successfully, you can access the app itself by port-forwarding `webapp` locally: ```bash -$ kubectl -n booksapp port-forward svc/webapp 7000 & +kubectl -n booksapp port-forward svc/webapp 7000 & ``` Open [http://localhost:7000/](http://localhost:7000/) in your browser to see the @@ -87,7 +87,7 @@ First, let's run the `linkerd viz authz` command to list the authorization resources that currently exist for the `authors` deployment: ```bash -$ linkerd viz authz -n booksapp deploy/authors +linkerd viz authz -n booksapp deploy/authors ROUTE SERVER AUTHORIZATION UNAUTHORIZED SUCCESS RPS LATENCY_P50 LATENCY_P95 LATENCY_P99 default default:all-unauthenticated default/all-unauthenticated 0.0rps 70.31% 8.1rps 1ms 43ms 49ms probe default:all-unauthenticated default/probe 0.0rps 100.00% 0.3rps 1ms 1ms 1ms @@ -124,7 +124,7 @@ Now that we've defined a [`Server`] for the authors `Deployment`, we can run the currently unauthorized: ```bash -$ linkerd viz authz -n booksapp deploy/authors +linkerd viz authz -n booksapp deploy/authors ROUTE SERVER AUTHORIZATION UNAUTHORIZED SUCCESS RPS LATENCY_P50 LATENCY_P95 LATENCY_P99 default authors-server 9.5rps 0.00% 0.0rps 0ms 0ms 0ms probe authors-server default/probe 0.0rps 100.00% 0.1rps 1ms 1ms 1ms @@ -312,7 +312,7 @@ network (0.0.0.0). Running `linkerd viz authz` again, we can now see that our new policies exist: ```bash -$ linkerd viz authz -n booksapp deploy/authors +linkerd viz authz -n booksapp deploy/authors ROUTE SERVER AUTHORIZATION UNAUTHORIZED SUCCESS RPS LATENCY_P50 LATENCY_P95 LATENCY_P99 authors-get-route authors-server authorizationpolicy/authors-get-policy 0.0rps 100.00% 0.1rps 2ms 2ms 2ms authors-probe-route authors-server authorizationpolicy/authors-probe-policy 0.0rps 100.00% 0.1rps 1ms 1ms 1ms @@ -383,7 +383,7 @@ requests, but we haven't _authorized_ requests to that route. Running the requests to `authors-modify-route`: ```bash -$ linkerd viz authz -n booksapp deploy/authors +linkerd viz authz -n booksapp deploy/authors ROUTE SERVER AUTHORIZATION UNAUTHORIZED SUCCESS RPS LATENCY_P50 LATENCY_P95 LATENCY_P99 authors-get-route authors-server authorizationpolicy/authors-get-policy - - - - - - authors-modify-route authors-server 9.7rps 0.00% 0.0rps 0ms 0ms 0ms @@ -442,7 +442,7 @@ Running the `linkerd viz authz` command one last time, we now see that all traffic is authorized: ```bash -$ linkerd viz authz -n booksapp deploy/authors +linkerd viz authz -n booksapp deploy/authors ROUTE SERVER AUTHORIZATION UNAUTHORIZED SUCCESS RPS LATENCY_P50 LATENCY_P95 LATENCY_P99 authors-get-route authors-server authorizationpolicy/authors-get-policy 0.0rps 100.00% 0.1rps 0ms 0ms 0ms authors-modify-route authors-server authorizationpolicy/authors-modify-policy 0.0rps 100.00% 0.0rps 0ms 0ms 0ms diff --git a/linkerd.io/content/2.19/tasks/managing-egress-traffic.md b/linkerd.io/content/2.19/tasks/managing-egress-traffic.md index a43eadb61a..518c4f72f7 100644 --- a/linkerd.io/content/2.19/tasks/managing-egress-traffic.md +++ b/linkerd.io/content/2.19/tasks/managing-egress-traffic.md @@ -70,7 +70,7 @@ Now SSH into the client container and start generating some external traffic: ```bash kubectl -n egress-test exec -it client -c client -- sh -$ while sleep 1; do curl -s http://httpbin.org/get ; done +while sleep 1; do curl -s http://httpbin.org/get ; done ``` In a separate shell, you can use the Linkerd diagnostics command to visualize @@ -235,7 +235,7 @@ Interestingly enough though, if we go back to our client shell and we try to initiate HTTPS traffic to the same service, it will not be allowed: ```bash -~ $ curl -v https://httpbin.org/get +~ curl -v https://httpbin.org/get curl: (35) TLS connect error: error:00000000:lib(0)::reason(0) ``` @@ -458,7 +458,7 @@ Now let's verify all works as expected: ```bash # plaintext traffic goes as expected to the /get path -$ curl http://httpbin.org/get +curl http://httpbin.org/get { "args": {}, "headers": { @@ -472,14 +472,14 @@ $ curl http://httpbin.org/get } # encrypted traffic can target all paths and hosts -$ curl https://httpbin.org/ip +curl https://httpbin.org/ip { "origin": "51.116.126.217" } # arbitrary unencrypted traffic goes to the internal service -$ curl http://google.com +curl http://google.com { "requestUID": "in:http-sid:terminus-grpc:-1-h1:80-190120723", "payload": "You cannot go there right now"} diff --git a/linkerd.io/content/2.19/tasks/multicluster-using-statefulsets.md b/linkerd.io/content/2.19/tasks/multicluster-using-statefulsets.md index 81969979a0..83c638a4ae 100644 --- a/linkerd.io/content/2.19/tasks/multicluster-using-statefulsets.md +++ b/linkerd.io/content/2.19/tasks/multicluster-using-statefulsets.md @@ -48,8 +48,8 @@ The first step is to clone the demo repository on your local machine. ```sh # clone example repository -$ git clone git@github.com:linkerd/l2d-k3d-statefulset.git -$ cd l2d-k3d-statefulset +git clone git@github.com:linkerd/l2d-k3d-statefulset.git +cd l2d-k3d-statefulset ``` The second step consists of creating two `k3d` clusters named `east` and `west`, @@ -60,10 +60,10 @@ everything. ```sh # create k3d clusters -$ ./create.sh +./create.sh # list the clusters -$ k3d cluster list +k3d cluster list NAME SERVERS AGENTS LOADBALANCER east 1/1 0/0 true west 1/1 0/0 true @@ -77,10 +77,10 @@ controllers and links are generated for both clusters. ```sh # Install Linkerd and multicluster, output to check should be a success -$ ./install.sh +./install.sh # Next, link the two clusters together -$ ./link.sh +./link.sh ``` Perfect! If you've made it this far with no errors, then it's a good sign. In @@ -100,17 +100,17 @@ communication. First, we will deploy our pods and services: ```sh # deploy services and mesh namespaces -$ ./deploy.sh +./deploy.sh # verify both clusters # # verify east -$ kubectl --context=k3d-east get pods +kubectl --context=k3d-east get pods NAME READY STATUS RESTARTS AGE curl-56dc7d945d-96r6p 2/2 Running 0 7s # verify west has headless service -$ kubectl --context=k3d-west get services +kubectl --context=k3d-west get services NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE kubernetes ClusterIP 10.43.0.1 443/TCP 10m nginx-svc ClusterIP None 80/TCP 8s @@ -118,7 +118,7 @@ nginx-svc ClusterIP None 80/TCP 8s # verify west has statefulset # # this may take a while to come up -$ kubectl --context=k3d-west get pods +kubectl --context=k3d-west get pods NAME READY STATUS RESTARTS AGE nginx-set-0 2/2 Running 0 53s nginx-set-1 2/2 Running 0 43s @@ -129,7 +129,7 @@ Before we go further, let's have a look at the endpoints object for the `nginx-svc`: ```sh -$ kubectl --context=k3d-west get endpoints nginx-svc -o yaml +kubectl --context=k3d-west get endpoints nginx-svc -o yaml ... subsets: - addresses: @@ -169,23 +169,23 @@ would get an answer back. We can test this out by applying the curl pod to the `west` cluster: ```sh -$ kubectl --context=k3d-west apply -f east/curl.yml -$ kubectl --context=k3d-west get pods +kubectl --context=k3d-west apply -f east/curl.yml +kubectl --context=k3d-west get pods NAME READY STATUS RESTARTS AGE nginx-set-0 2/2 Running 0 5m8s nginx-set-1 2/2 Running 0 4m58s nginx-set-2 2/2 Running 0 4m51s curl-56dc7d945d-s4n8j 0/2 PodInitializing 0 4s -$ kubectl --context=k3d-west exec -it curl-56dc7d945d-s4n8j -c curl -- sh -/$ # prompt for curl pod +kubectl --context=k3d-west exec -it curl-56dc7d945d-s4n8j -c curl -- sh +/# prompt for curl pod ``` If we now curl one of these instances, we will get back a response. ```sh # exec'd on the pod -/ $ curl nginx-set-0.nginx-svc.default.svc.west.cluster.local +/ curl nginx-set-0.nginx-svc.default.svc.west.cluster.local " @@ -217,10 +217,10 @@ Now, let's do the same, but this time from the `east` cluster. We will first export the service. ```sh -$ kubectl --context=k3d-west label service nginx-svc mirror.linkerd.io/exported="true" +kubectl --context=k3d-west label service nginx-svc mirror.linkerd.io/exported="true" service/nginx-svc labeled -$ kubectl --context=k3d-east get services +kubectl --context=k3d-east get services NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE kubernetes ClusterIP 10.43.0.1 443/TCP 20h nginx-svc-west ClusterIP None 80/TCP 29s @@ -234,7 +234,7 @@ endpoints for `nginx-svc-west` will have the same hostnames, but each hostname will point to one of the services we see above: ```sh -$ kubectl --context=k3d-east get endpoints nginx-svc-k3d-west -o yaml +kubectl --context=k3d-east get endpoints nginx-svc-k3d-west -o yaml subsets: - addresses: - hostname: nginx-set-0 @@ -250,17 +250,17 @@ cluster (`west`), will be mirrored as a clusterIP service. We will see in a second why this matters. ```sh -$ kubectl --context=k3d-east get pods +kubectl --context=k3d-east get pods NAME READY STATUS RESTARTS AGE curl-56dc7d945d-96r6p 2/2 Running 0 23m # exec and curl -$ kubectl --context=k3d-east exec curl-56dc7d945d-96r6p -it -c curl -- sh +kubectl --context=k3d-east exec curl-56dc7d945d-96r6p -it -c curl -- sh # we want to curl the same hostname we see in the endpoints object above. # however, the service and cluster domain will now be different, since we # are in a different cluster. # -/ $ curl nginx-set-0.nginx-svc-k3d-west.default.svc.east.cluster.local +/ curl nginx-set-0.nginx-svc-k3d-west.default.svc.east.cluster.local @@ -328,8 +328,8 @@ validation. To clean-up, you can remove both clusters entirely using the k3d CLI: ```sh -$ k3d cluster delete east +k3d cluster delete east cluster east deleted -$ k3d cluster delete west +k3d cluster delete west cluster west deleted ``` diff --git a/linkerd.io/content/2.19/tasks/multicluster.md b/linkerd.io/content/2.19/tasks/multicluster.md index 3a80b3f3ed..2779b7616a 100644 --- a/linkerd.io/content/2.19/tasks/multicluster.md +++ b/linkerd.io/content/2.19/tasks/multicluster.md @@ -506,9 +506,9 @@ To cleanup the multicluster control plane, you can run: ```bash # Delete the link CR -$ kubectl --context=west -n linkerd-multicluster delete links east +kubectl --context=west -n linkerd-multicluster delete links east # Delete the test namespace and uninstall multicluster -$ for ctx in west east; do \ +for ctx in west east; do \ kubectl --context=${ctx} delete ns test; \ linkerd --context=${ctx} multicluster uninstall | kubectl --context=${ctx} delete -f - ; \ done diff --git a/linkerd.io/content/2.19/tasks/restricting-access.md b/linkerd.io/content/2.19/tasks/restricting-access.md index 5654518600..c9850725f7 100644 --- a/linkerd.io/content/2.19/tasks/restricting-access.md +++ b/linkerd.io/content/2.19/tasks/restricting-access.md @@ -21,9 +21,9 @@ haven't already done this. Inject and install the Emojivoto application: ```bash -$ linkerd inject https://run.linkerd.io/emojivoto.yml | kubectl apply -f - +linkerd inject https://run.linkerd.io/emojivoto.yml | kubectl apply -f - ... -$ linkerd check -n emojivoto --proxy -o short +linkerd check -n emojivoto --proxy -o short ... ``` diff --git a/linkerd.io/content/2.19/tasks/securing-linkerd-tap.md b/linkerd.io/content/2.19/tasks/securing-linkerd-tap.md index 8a802c890c..639f81692f 100644 --- a/linkerd.io/content/2.19/tasks/securing-linkerd-tap.md +++ b/linkerd.io/content/2.19/tasks/securing-linkerd-tap.md @@ -60,7 +60,7 @@ kubectl auth can-i watch deployments.tap.linkerd.io -n emojivoto --as $(whoami) You can also use the Linkerd CLI's `--as` flag to confirm: ```bash -$ linkerd viz tap -n linkerd deploy/linkerd-controller --as $(whoami) +linkerd viz tap -n linkerd deploy/linkerd-controller --as $(whoami) Cannot connect to Linkerd Viz: namespaces is forbidden: User "XXXX" cannot list resource "namespaces" in API group "" at the cluster scope Validate the install with: linkerd viz check ... @@ -77,7 +77,7 @@ To enable tap access to all resources in all namespaces, you may bind your user to the `linkerd-linkerd-tap-admin` ClusterRole, installed by default: ```bash -$ kubectl describe clusterroles/linkerd-linkerd-viz-tap-admin +kubectl describe clusterroles/linkerd-linkerd-viz-tap-admin Name: linkerd-linkerd-viz-tap-admin Labels: component=tap linkerd.io/extension=viz @@ -109,7 +109,7 @@ kubectl create clusterrolebinding \ You can verify you now have tap access with: ```bash -$ linkerd viz tap -n linkerd deploy/linkerd-controller --as $(whoami) +linkerd viz tap -n linkerd deploy/linkerd-controller --as $(whoami) req id=3:0 proxy=in src=10.244.0.1:37392 dst=10.244.0.13:9996 tls=not_provided_by_remote :method=GET :authority=10.244.0.13:9996 :path=/ping ... ``` @@ -143,14 +143,14 @@ Because GCloud provides this additional level of access, there are cases where not. To validate this, check whether your GCloud user has Tap access: ```bash -$ kubectl auth can-i watch pods.tap.linkerd.io --all-namespaces +kubectl auth can-i watch pods.tap.linkerd.io --all-namespaces yes ``` And then validate whether your RBAC user has Tap access: ```bash -$ kubectl auth can-i watch pods.tap.linkerd.io --all-namespaces --as $(gcloud config get-value account) +kubectl auth can-i watch pods.tap.linkerd.io --all-namespaces --as $(gcloud config get-value account) no - no RBAC policy matched ``` @@ -187,14 +187,14 @@ privileges necessary to tap resources. To confirm: ```bash -$ kubectl auth can-i watch pods.tap.linkerd.io --all-namespaces --as system:serviceaccount:linkerd-viz:web +kubectl auth can-i watch pods.tap.linkerd.io --all-namespaces --as system:serviceaccount:linkerd-viz:web yes ``` This access is enabled via a `linkerd-linkerd-viz-web-admin` ClusterRoleBinding: ```bash -$ kubectl describe clusterrolebindings/linkerd-linkerd-viz-web-admin +kubectl describe clusterrolebindings/linkerd-linkerd-viz-web-admin Name: linkerd-linkerd-viz-web-admin Labels: component=web linkerd.io/extensions=viz @@ -227,6 +227,6 @@ kubectl delete clusterrolebindings/linkerd-linkerd-viz-web-admin To confirm: ```bash -$ kubectl auth can-i watch pods.tap.linkerd.io --all-namespaces --as system:serviceaccount:linkerd-viz:web +kubectl auth can-i watch pods.tap.linkerd.io --all-namespaces --as system:serviceaccount:linkerd-viz:web no ``` diff --git a/linkerd.io/content/2.19/tasks/troubleshooting.md b/linkerd.io/content/2.19/tasks/troubleshooting.md index baaa71e206..5586441c26 100644 --- a/linkerd.io/content/2.19/tasks/troubleshooting.md +++ b/linkerd.io/content/2.19/tasks/troubleshooting.md @@ -230,7 +230,7 @@ Example failure: Ensure the Linkerd ClusterRoles exist: ```bash -$ kubectl get clusterroles | grep linkerd +kubectl get clusterroles | grep linkerd linkerd-linkerd-destination 9d linkerd-linkerd-identity 9d linkerd-linkerd-proxy-injector 9d @@ -240,7 +240,7 @@ linkerd-policy 9d Also ensure you have permission to create ClusterRoles: ```bash -$ kubectl auth can-i create clusterroles +kubectl auth can-i create clusterroles yes ``` @@ -257,7 +257,7 @@ Example failure: Ensure the Linkerd ClusterRoleBindings exist: ```bash -$ kubectl get clusterrolebindings | grep linkerd +kubectl get clusterrolebindings | grep linkerd linkerd-linkerd-destination 9d linkerd-linkerd-identity 9d linkerd-linkerd-proxy-injector 9d @@ -267,7 +267,7 @@ linkerd-destination-policy 9d Also ensure you have permission to create ClusterRoleBindings: ```bash -$ kubectl auth can-i create clusterrolebindings +kubectl auth can-i create clusterrolebindings yes ``` @@ -284,7 +284,7 @@ Example failure: Ensure the Linkerd ServiceAccounts exist: ```bash -$ kubectl -n linkerd get serviceaccounts +kubectl -n linkerd get serviceaccounts NAME SECRETS AGE default 1 14m linkerd-destination 1 14m @@ -297,7 +297,7 @@ Also ensure you have permission to create ServiceAccounts in the Linkerd namespace: ```bash -$ kubectl -n linkerd auth can-i create serviceaccounts +kubectl -n linkerd auth can-i create serviceaccounts yes ``` @@ -314,7 +314,7 @@ Example failure: Ensure the Linkerd CRD exists: ```bash -$ kubectl get customresourcedefinitions +kubectl get customresourcedefinitions NAME CREATED AT serviceprofiles.linkerd.io 2019-04-25T21:47:31Z ``` @@ -322,7 +322,7 @@ serviceprofiles.linkerd.io 2019-04-25T21:47:31Z Also ensure you have permission to create CRDs: ```bash -$ kubectl auth can-i create customresourcedefinitions +kubectl auth can-i create customresourcedefinitions yes ``` @@ -339,14 +339,14 @@ Example failure: Ensure the Linkerd MutatingWebhookConfigurations exists: ```bash -$ kubectl get mutatingwebhookconfigurations | grep linkerd +kubectl get mutatingwebhookconfigurations | grep linkerd linkerd-proxy-injector-webhook-config 2019-07-01T13:13:26Z ``` Also ensure you have permission to create MutatingWebhookConfigurations: ```bash -$ kubectl auth can-i create mutatingwebhookconfigurations +kubectl auth can-i create mutatingwebhookconfigurations yes ``` @@ -363,14 +363,14 @@ Example failure: Ensure the Linkerd ValidatingWebhookConfiguration exists: ```bash -$ kubectl get validatingwebhookconfigurations | grep linkerd +kubectl get validatingwebhookconfigurations | grep linkerd linkerd-sp-validator-webhook-config 2019-07-01T13:13:26Z ``` Also ensure you have permission to create ValidatingWebhookConfigurations: ```bash -$ kubectl auth can-i create validatingwebhookconfigurations +kubectl auth can-i create validatingwebhookconfigurations yes ``` @@ -418,7 +418,7 @@ Example failure: Ensure the Linkerd ConfigMap exists: ```bash -$ kubectl -n linkerd get configmap/linkerd-config +kubectl -n linkerd get configmap/linkerd-config NAME DATA AGE linkerd-config 3 61m ``` @@ -426,7 +426,7 @@ linkerd-config 3 61m Also ensure you have permission to create ConfigMaps: ```bash -$ kubectl -n linkerd auth can-i create configmap +kubectl -n linkerd auth can-i create configmap yes ``` @@ -780,7 +780,7 @@ Example failure: Verify the state of the control plane pods with: ```bash -$ kubectl -n linkerd get po +kubectl -n linkerd get po NAME READY STATUS RESTARTS AGE linkerd-destination-5fd7b5d466-szgqm 2/2 Running 1 12m linkerd-identity-54df78c479-hbh5m 2/2 Running 0 12m @@ -862,7 +862,7 @@ Ensure you can connect to the Linkerd version check endpoint from the environment the `linkerd` cli is running: ```bash -$ curl "https://versioncheck.linkerd.io/version.json?version=edge-19.1.2&uuid=test-uuid&source=cli" +curl "https://versioncheck.linkerd.io/version.json?version=edge-19.1.2&uuid=test-uuid&source=cli" {"stable":"stable-2.1.0","edge":"edge-19.1.2"} ``` @@ -961,7 +961,7 @@ normally. Example failure: ```bash -$ linkerd check --proxy --namespace foo +linkerd check --proxy --namespace foo ... × data plane namespace exists The "foo" namespace does not exist @@ -1147,7 +1147,7 @@ Example error: Ensure that the linkerd-cni-config ConfigMap exists in the CNI namespace: ```bash -$ kubectl get cm linkerd-cni-config -n linkerd-cni +kubectl get cm linkerd-cni-config -n linkerd-cni NAME PRIV CAPS SELINUX RUNASUSER FSGROUP SUPGROUP READONLYROOTFS VOLUMES linkerd-linkerd-cni-cni false RunAsAny RunAsAny RunAsAny RunAsAny false hostPath,secret ``` @@ -1155,7 +1155,7 @@ linkerd-linkerd-cni-cni false RunAsAny RunAsAny RunAsAny RunAs Also ensure you have permission to create ConfigMaps: ```bash -$ kubectl auth can-i create ConfigMaps +kubectl auth can-i create ConfigMaps yes ``` @@ -1172,7 +1172,7 @@ Example error: Ensure that the cluster role exists: ```bash -$ kubectl get clusterrole linkerd-cni +kubectl get clusterrole linkerd-cni NAME AGE linkerd-cni 54m ``` @@ -1180,7 +1180,7 @@ linkerd-cni 54m Also ensure you have permission to create ClusterRoles: ```bash -$ kubectl auth can-i create ClusterRoles +kubectl auth can-i create ClusterRoles yes ``` @@ -1197,7 +1197,7 @@ Example error: Ensure that the cluster role binding exists: ```bash -$ kubectl get clusterrolebinding linkerd-cni +kubectl get clusterrolebinding linkerd-cni NAME AGE linkerd-cni 54m ``` @@ -1205,7 +1205,7 @@ linkerd-cni 54m Also ensure you have permission to create ClusterRoleBindings: ```bash -$ kubectl auth can-i create ClusterRoleBindings +kubectl auth can-i create ClusterRoleBindings yes ``` @@ -1222,7 +1222,7 @@ Example error: Ensure that the CNI service account exists in the CNI namespace: ```bash -$ kubectl get ServiceAccount linkerd-cni -n linkerd-cni +kubectl get ServiceAccount linkerd-cni -n linkerd-cni NAME SECRETS AGE linkerd-cni 1 45m ``` @@ -1230,7 +1230,7 @@ linkerd-cni 1 45m Also ensure you have permission to create ServiceAccount: ```bash -$ kubectl auth can-i create ServiceAccounts -n linkerd-cni +kubectl auth can-i create ServiceAccounts -n linkerd-cni yes ``` @@ -1247,7 +1247,7 @@ Example error: Ensure that the CNI daemonset exists in the CNI namespace: ```bash -$ kubectl get ds -n linkerd-cni +kubectl get ds -n linkerd-cni NAME DESIRED CURRENT READY UP-TO-DATE AVAILABLE NODE SELECTOR AGE linkerd-cni 1 1 1 1 1 beta.kubernetes.io/os=linux 14m ``` @@ -1255,7 +1255,7 @@ linkerd-cni 1 1 1 1 1 beta.kubernet Also ensure you have permission to create DaemonSets: ```bash -$ kubectl auth can-i create DaemonSets -n linkerd-cni +kubectl auth can-i create DaemonSets -n linkerd-cni yes ``` @@ -1272,7 +1272,7 @@ Example failure: Ensure that all the CNI pods are running: ```bash -$ kubectl get po -n linkerd-cn +kubectl get po -n linkerd-cn NAME READY STATUS RESTARTS AGE linkerd-cni-rzp2q 1/1 Running 0 9m20s linkerd-cni-mf564 1/1 Running 0 9m22s @@ -1282,7 +1282,7 @@ linkerd-cni-p5670 1/1 Running 0 9m25s Ensure that all pods have finished the deployment of the CNI config and binary: ```bash -$ kubectl logs linkerd-cni-rzp2q -n linkerd-cni +kubectl logs linkerd-cni-rzp2q -n linkerd-cni Wrote linkerd CNI binaries to /host/opt/cni/bin Created CNI config /host/etc/cni/net.d/10-kindnet.conflist Done configuring CNI. Sleep=true @@ -1310,7 +1310,7 @@ Make sure multicluster extension is correctly installed and that the `links.multicluster.linkerd.io` CRD is present. ```bash -$ kubectl get crds | grep multicluster +kubectl get crds | grep multicluster NAME CREATED AT links.multicluster.linkerd.io 2021-03-10T09:58:10Z ``` @@ -1400,7 +1400,7 @@ the rules section. Expected rules for `linkerd-service-mirror-access-local-resources` cluster role: ```bash -$ kubectl --context=local get clusterrole linkerd-service-mirror-access-local-resources -o yaml +kubectl --context=local get clusterrole linkerd-service-mirror-access-local-resources -o yaml kind: ClusterRole metadata: labels: @@ -1433,7 +1433,7 @@ rules: Expected rules for `linkerd-service-mirror-read-remote-creds` role: ```bash -$ kubectl --context=local get role linkerd-service-mirror-read-remote-creds -n linkerd-multicluster -o yaml +kubectl --context=local get role linkerd-service-mirror-read-remote-creds -n linkerd-multicluster -o yaml kind: Role metadata: labels: @@ -1466,7 +1466,7 @@ everything to start up. If this is a permanent error, you'll want to validate the state of the controller pod with: ```bash -$ kubectl --all-namespaces get po --selector linkerd.io/control-plane-component=linkerd-service-mirror +kubectl --all-namespaces get po --selector linkerd.io/control-plane-component=linkerd-service-mirror NAME READY STATUS RESTARTS AGE linkerd-service-mirror-7bb8ff5967-zg265 2/2 Running 0 50m ``` @@ -1612,7 +1612,7 @@ Example failure: Ensure the linkerd-viz extension ClusterRoles exist: ```bash -$ kubectl get clusterroles | grep linkerd-viz +kubectl get clusterroles | grep linkerd-viz linkerd-linkerd-viz-metrics-api 2021-01-26T18:02:17Z linkerd-linkerd-viz-prometheus 2021-01-26T18:02:17Z linkerd-linkerd-viz-tap 2021-01-26T18:02:17Z @@ -1623,7 +1623,7 @@ linkerd-linkerd-viz-web-check 2021-01-2 Also ensure you have permission to create ClusterRoles: ```bash -$ kubectl auth can-i create clusterroles +kubectl auth can-i create clusterroles yes ``` @@ -1640,7 +1640,7 @@ Example failure: Ensure the linkerd-viz extension ClusterRoleBindings exist: ```bash -$ kubectl get clusterrolebindings | grep linkerd-viz +kubectl get clusterrolebindings | grep linkerd-viz linkerd-linkerd-viz-metrics-api ClusterRole/linkerd-linkerd-viz-metrics-api 18h linkerd-linkerd-viz-prometheus ClusterRole/linkerd-linkerd-viz-prometheus 18h linkerd-linkerd-viz-tap ClusterRole/linkerd-linkerd-viz-tap 18h @@ -1652,7 +1652,7 @@ linkerd-linkerd-viz-web-check ClusterRole/linkerd-linke Also ensure you have permission to create ClusterRoleBindings: ```bash -$ kubectl auth can-i create clusterrolebindings +kubectl auth can-i create clusterrolebindings yes ``` @@ -1741,7 +1741,7 @@ requirements in the cluster: Ensure all the linkerd-viz pods are injected ```bash -$ kubectl -n linkerd-viz get pods +kubectl -n linkerd-viz get pods NAME READY STATUS RESTARTS AGE grafana-68cddd7cc8-nrv4h 2/2 Running 3 18h metrics-api-77f684f7c7-hnw8r 2/2 Running 2 18h @@ -1765,7 +1765,7 @@ Make sure that the `proxy-injector` is working correctly by running Ensure all the linkerd-viz pods are running with 2/2 ```bash -$ kubectl -n linkerd-viz get pods +kubectl -n linkerd-viz get pods NAME READY STATUS RESTARTS AGE grafana-68cddd7cc8-nrv4h 2/2 Running 3 18h metrics-api-77f684f7c7-hnw8r 2/2 Running 2 18h @@ -1936,7 +1936,7 @@ Ensure you can connect to the Linkerd Buoyant version check endpoint from the environment the `linkerd` cli is running: ```bash -$ curl https://buoyant.cloud/version.json +curl https://buoyant.cloud/version.json {"linkerd-buoyant":"v0.4.4"} ``` @@ -2001,7 +2001,7 @@ linkerd-buoyant install | kubectl apply -f - Ensure that the cluster role exists: ```bash -$ kubectl get clusterrole buoyant-cloud-agent +kubectl get clusterrole buoyant-cloud-agent NAME CREATED AT buoyant-cloud-agent 2020-11-13T00:59:50Z ``` @@ -2009,7 +2009,7 @@ buoyant-cloud-agent 2020-11-13T00:59:50Z Also ensure you have permission to create ClusterRoles: ```bash -$ kubectl auth can-i create ClusterRoles +kubectl auth can-i create ClusterRoles yes ``` @@ -2024,7 +2024,7 @@ yes Ensure that the cluster role binding exists: ```bash -$ kubectl get clusterrolebinding buoyant-cloud-agent +kubectl get clusterrolebinding buoyant-cloud-agent NAME ROLE AGE buoyant-cloud-agent ClusterRole/buoyant-cloud-agent 301d ``` @@ -2032,7 +2032,7 @@ buoyant-cloud-agent ClusterRole/buoyant-cloud-agent 301d Also ensure you have permission to create ClusterRoleBindings: ```bash -$ kubectl auth can-i create ClusterRoleBindings +kubectl auth can-i create ClusterRoleBindings yes ``` @@ -2047,7 +2047,7 @@ yes Ensure that the service account exists: ```bash -$ kubectl -n buoyant-cloud get serviceaccount buoyant-cloud-agent +kubectl -n buoyant-cloud get serviceaccount buoyant-cloud-agent NAME SECRETS AGE buoyant-cloud-agent 1 301d ``` @@ -2055,7 +2055,7 @@ buoyant-cloud-agent 1 301d Also ensure you have permission to create ServiceAccounts: ```bash -$ kubectl -n buoyant-cloud auth can-i create ServiceAccount +kubectl -n buoyant-cloud auth can-i create ServiceAccount yes ``` @@ -2070,7 +2070,7 @@ yes Ensure that the secret exists: ```bash -$ kubectl -n buoyant-cloud get secret buoyant-cloud-id +kubectl -n buoyant-cloud get secret buoyant-cloud-id NAME TYPE DATA AGE buoyant-cloud-id Opaque 4 301d ``` @@ -2078,7 +2078,7 @@ buoyant-cloud-id Opaque 4 301d Also ensure you have permission to create ServiceAccounts: ```bash -$ kubectl -n buoyant-cloud auth can-i create ServiceAccount +kubectl -n buoyant-cloud auth can-i create ServiceAccount yes ``` @@ -2116,7 +2116,7 @@ everything to start up. If this is a permanent error, you'll want to validate the state of the `buoyant-cloud-agent` Deployment with: ```bash -$ kubectl -n buoyant-cloud get po --selector app=buoyant-cloud-agent +kubectl -n buoyant-cloud get po --selector app=buoyant-cloud-agent NAME READY STATUS RESTARTS AGE buoyant-cloud-agent-6b8c6888d7-htr7d 2/2 Running 0 156m ``` @@ -2139,7 +2139,7 @@ Ensure the `buoyant-cloud-agent` pod is injected, the `READY` column should show `2/2`: ```bash -$ kubectl -n buoyant-cloud get pods --selector app=buoyant-cloud-agent +kubectl -n buoyant-cloud get pods --selector app=buoyant-cloud-agent NAME READY STATUS RESTARTS AGE buoyant-cloud-agent-6b8c6888d7-htr7d 2/2 Running 0 161m ``` @@ -2158,7 +2158,7 @@ Make sure that the `proxy-injector` is working correctly by running Check the version with: ```bash -$ linkerd-buoyant version +linkerd-buoyant version CLI version: v0.4.4 Agent version: v0.4.4 ``` @@ -2217,7 +2217,7 @@ everything to start up. If this is a permanent error, you'll want to validate the state of the `buoyant-cloud-metrics` DaemonSet with: ```bash -$ kubectl -n buoyant-cloud get po --selector app=buoyant-cloud-metrics +kubectl -n buoyant-cloud get po --selector app=buoyant-cloud-metrics NAME READY STATUS RESTARTS AGE buoyant-cloud-metrics-kt9mv 2/2 Running 0 163m buoyant-cloud-metrics-q8jhj 2/2 Running 0 163m @@ -2243,7 +2243,7 @@ Ensure the `buoyant-cloud-metrics` pods are injected, the `READY` column should show `2/2`: ```bash -$ kubectl -n buoyant-cloud get pods --selector app=buoyant-cloud-metrics +kubectl -n buoyant-cloud get pods --selector app=buoyant-cloud-metrics NAME READY STATUS RESTARTS AGE buoyant-cloud-metrics-kt9mv 2/2 Running 0 166m buoyant-cloud-metrics-q8jhj 2/2 Running 0 166m @@ -2265,7 +2265,7 @@ Make sure that the `proxy-injector` is working correctly by running Check the version with: ```bash -$ kubectl -n buoyant-cloud get daemonset/buoyant-cloud-metrics -o jsonpath='{.metadata.labels}' +kubectl -n buoyant-cloud get daemonset/buoyant-cloud-metrics -o jsonpath='{.metadata.labels}' {"app.kubernetes.io/name":"metrics","app.kubernetes.io/part-of":"buoyant-cloud","app.kubernetes.io/version":"v0.4.4"} ``` diff --git a/linkerd.io/content/blog/2016/1210-slow-cooker-load-testing-for-tough-software/index.md b/linkerd.io/content/blog/2016/1210-slow-cooker-load-testing-for-tough-software/index.md index e75e31a998..59909fb59f 100644 --- a/linkerd.io/content/blog/2016/1210-slow-cooker-load-testing-for-tough-software/index.md +++ b/linkerd.io/content/blog/2016/1210-slow-cooker-load-testing-for-tough-software/index.md @@ -91,7 +91,7 @@ static content. The latencies given are in milliseconds, and we report the min, p50, p95, p99, p999, and max latencies seen during this 10 second interval. ```txt -$ ./slow_cooker_linux_amd64 -url http://target:4140 -qps 50 -concurrency 10 http://perf-target-2:8080 +./slow_cooker_linux_amd64 -url http://target:4140 -qps 50 -concurrency 10 http://perf-target-2:8080 # sending 500 req/s with concurrency=10 to http://perf-target-2:8080 ... # good/b/f t good% min [p50 p95 p99 p999] max change 2016-10-12T20:34:20Z 4990/0/0 5000 99% 10s 0 [ 1 3 4 9 ] 9 @@ -120,7 +120,7 @@ latency. In the example below, we have a backend server suffering from a catastrophic slow down: ```txt -$ ./slow_cooker_linux_amd64 -totalRequests 100000 -qps 5 -concurrency 100 http://perf-target-1:8080 +./slow_cooker_linux_amd64 -totalRequests 100000 -qps 5 -concurrency 100 http://perf-target-1:8080 # sending 500 req/s with concurrency=10 to http://perf-target-2:8080 ... # good/b/f t good% min [p50 p95 p99 p999] max change 2016-11-14T20:58:13Z 4900/0/0 5000 98% 10s 0 [ 1 2 6 8 ] 8 + @@ -165,7 +165,7 @@ For comparison, let’s start with a [ApacheBench](http://httpd.apache.org/docs/2.4/programs/ab.html)’s report: ```txt -$ ab -n 100000 -c 10 http://perf-target-1:8080/ +ab -n 100000 -c 10 http://perf-target-1:8080/ This is ApacheBench, Version 2.3 Copyright 1996 Adam Twiss, Zeus Technology Ltd, http://www.zeustech.net/ Licensed to The Apache Software Foundation, http://www.apache.org/ @@ -232,7 +232,7 @@ becomes much more clear that the 99.9th percentile is consistently high; this is not just a few outliers, but a persistent and ongoing problem: ```txt -$ ./slow_cooker_linux_amd64 -totalRequests 20000 -qps 50 -concurrency 10 http://perf-target-2:8080 +./slow_cooker_linux_amd64 -totalRequests 20000 -qps 50 -concurrency 10 http://perf-target-2:8080 # sending 500 req/s with concurrency=10 to http://perf-target-2:8080 ... # good/b/f t good% min [p50 p95 p99 p999] max change 2016-12-07T19:05:37Z 2510/0/0 5000 50% 10s 0 [ 0 0 2 4995 ] 4994 + diff --git a/linkerd.io/content/blog/2018/1208-service-profiles-for-per-route-metrics/index.md b/linkerd.io/content/blog/2018/1208-service-profiles-for-per-route-metrics/index.md index b54e698c64..8936d6c39f 100644 --- a/linkerd.io/content/blog/2018/1208-service-profiles-for-per-route-metrics/index.md +++ b/linkerd.io/content/blog/2018/1208-service-profiles-for-per-route-metrics/index.md @@ -150,7 +150,7 @@ service—but we can't, because we haven't defined any routes for that service yet! ```bash -$ linkerd routes svc/webapp +linkerd routes svc/webapp ROUTE SERVICE SUCCESS RPS LATENCY_P50 LATENCY_P95 LATENCY_P99 [UNKNOWN] webapp 70.00% 5.7rps 34ms 100ms 269ms ``` @@ -188,13 +188,13 @@ spec: This service describes two routes that the webapp service responds to, `/books` and `/books/`. We add the service profile with `kubectl apply`: -`$ kubectl apply -f webapp-profile.yaml` +`kubectl apply -f webapp-profile.yaml` Within about a minute (Prometheus scrapes metrics from the proxies at regular intervals) per-route metrics will be available for the `webapp` service. ```bash -$ linkerd routes svc/webapp +linkerd routes svc/webapp ROUTE SERVICE SUCCESS RPS LATENCY_P50 LATENCY_P95 LATENCY_P99 /books/{id} webapp 100.00% 0.3rps 26ms 75ms 95ms /books webapp 56.25% 0.5rps 25ms 320ms 384ms diff --git a/linkerd.io/content/blog/2019/0222-how-we-designed-retries-in-linkerd-2-2/index.md b/linkerd.io/content/blog/2019/0222-how-we-designed-retries-in-linkerd-2-2/index.md index 3e672ef738..01373a9ecc 100644 --- a/linkerd.io/content/blog/2019/0222-how-we-designed-retries-in-linkerd-2-2/index.md +++ b/linkerd.io/content/blog/2019/0222-how-we-designed-retries-in-linkerd-2-2/index.md @@ -164,7 +164,7 @@ One thing that we can notice about this application is that the success rate of requests from the books service to the authors service is very poor: ```bash -$ linkerd routes deploy/books --to svc/authors +linkerd routes deploy/books --to svc/authors ROUTE SERVICE SUCCESS RPS LATENCY_P50 LATENCY_P95 LATENCY_P99 [DEFAULT] authors 54.24% 3.9rps 5ms 14ms 19ms ``` @@ -173,8 +173,8 @@ To get a better picture of what’s going on here, let’s add a service profile the authors service, generated from a Swagger definition: ```bash -$ curl --proto '=https' --tlsv1.2 -sSfL https://run.linkerd.io/booksapp/authors.swagger | linkerd profile --open-api - authors | kubectl apply -f - -$ linkerd routes deploy/books --to svc/authors +curl --proto '=https' --tlsv1.2 -sSfL https://run.linkerd.io/booksapp/authors.swagger | linkerd profile --open-api - authors | kubectl apply -f - +linkerd routes deploy/books --to svc/authors ROUTE SERVICE SUCCESS RPS LATENCY_P50 LATENCY_P95 LATENCY_P99 DELETE /authors/{id}.json authors 0.00% 0.0rps 0ms 0ms 0ms GET /authors.json authors 0.00% 0.0rps 0ms 0ms 0ms @@ -190,7 +190,7 @@ time. To correct this, let’s edit the authors service profile and make those requests retryable: ```bash -$ kubectl edit sp/authors.default.svc.cluster.local +kubectl edit sp/authors.default.svc.cluster.local [...] - condition: method: HEAD @@ -203,7 +203,7 @@ After editing the service profile, we see a nearly immediate improvement in success rate: ```bash -$ linkerd routes deploy/books --to svc/authors -o wide +linkerd routes deploy/books --to svc/authors -o wide ROUTE SERVICE EFFECTIVE_SUCCESS EFFECTIVE_RPS ACTUAL_SUCCESS ACTUAL_RPS LATENCY_P50 LATENCY_P95 LATENCY_P99 DELETE /authors/{id}.json authors 0.00% 0.0rps 0.00% 0.0rps 0ms 0ms 0ms GET /authors.json authors 0.00% 0.0rps 0.00% 0.0rps 0ms 0ms 0ms @@ -221,7 +221,7 @@ the purposes of this demo, I’ll set a timeout of 25ms. Your results will vary depending on the characteristics of your system. ```bash -$ kubectl edit sp/authors.default.svc.cluster.local +kubectl edit sp/authors.default.svc.cluster.local [...] - condition: method: HEAD @@ -235,7 +235,7 @@ We now see that success rate has come down slightly because some requests are timing out, but that the tail latency has been greatly reduced: ```bash -$ linkerd routes deploy/books --to svc/authors -o wide +linkerd routes deploy/books --to svc/authors -o wide ROUTE SERVICE EFFECTIVE_SUCCESS EFFECTIVE_RPS ACTUAL_SUCCESS ACTUAL_RPS LATENCY_P50 LATENCY_P95 LATENCY_P99 DELETE /authors/{id}.json authors 0.00% 0.0rps 0.00% 0.0rps 0ms 0ms 0ms GET /authors.json authors 0.00% 0.0rps 0.00% 0.0rps 0ms 0ms 0ms diff --git a/linkerd.io/content/blog/2019/1007-linkerd-distributed-tracing/index.md b/linkerd.io/content/blog/2019/1007-linkerd-distributed-tracing/index.md index 790268900d..261bb1dbd5 100644 --- a/linkerd.io/content/blog/2019/1007-linkerd-distributed-tracing/index.md +++ b/linkerd.io/content/blog/2019/1007-linkerd-distributed-tracing/index.md @@ -82,7 +82,7 @@ on your cluster. If you don't, you can follow the instructions. ```bash -$ linkerd version +linkerd version Client version: stable-2.6 Server version: stable-2.6 ``` diff --git a/linkerd.io/content/blog/2024/1015-edge-release-roundup/index.md b/linkerd.io/content/blog/2024/1015-edge-release-roundup/index.md index 8f61008f3b..66c8b2094c 100644 --- a/linkerd.io/content/blog/2024/1015-edge-release-roundup/index.md +++ b/linkerd.io/content/blog/2024/1015-edge-release-roundup/index.md @@ -120,7 +120,7 @@ command line to the new metrics available based on Gateway API routes, for example: ```bash {class=disable-copy} -$ linkerd viz stat-outbound -n faces deploy/face +linkerd viz stat-outbound -n faces deploy/face NAME SERVICE ROUTE TYPE BACKEND SUCCESS RPS LATENCY_P50 LATENCY_P95 LATENCY_P99 TIMEOUTS RETRIES face smiley:80 smiley-route HTTPRoute 78.36% 6.32 41ms 5886ms 9177ms 0.00% 0.00% ├─────────────────────► smiley:80 79.34% 5.57 20ms 5725ms 9145ms 0.00% diff --git a/linkerd.io/content/blog/2025/0725-tilt-linkerd-nginx-part-2/index.md b/linkerd.io/content/blog/2025/0725-tilt-linkerd-nginx-part-2/index.md index dc79544aa8..44f4e39053 100644 --- a/linkerd.io/content/blog/2025/0725-tilt-linkerd-nginx-part-2/index.md +++ b/linkerd.io/content/blog/2025/0725-tilt-linkerd-nginx-part-2/index.md @@ -199,7 +199,7 @@ While the dashboard provides intuitive visualizations, the Linkerd CLI offers the same data in a terminal-friendly format for quick diagnostics: ```bash -$ linkerd viz top deployment/baz +linkerd viz top deployment/baz Source Destination Method Path Count Best Worst Last Success Rate foo-64798767b7-x8xvf baz-659dbf6895-v7gdm POST /demo.Baz/GetInfo 1187 81µs 9ms 124µs 100.00% bar-577c4bf849-cpdxl baz-659dbf6895-9twg9 POST /demo.Baz/GetInfo 1103 86µs 6ms 140µs 100.00% From 36f176acc12868d15a60c63b94780ebfa06e6deb Mon Sep 17 00:00:00 2001 From: Beza Date: Sun, 21 Dec 2025 19:48:36 -0300 Subject: [PATCH 02/31] fix namespace name of linkerd-cni Signed-off-by: Beza --- linkerd.io/content/2-edge/tasks/troubleshooting.md | 2 +- linkerd.io/content/2.10/tasks/troubleshooting.md | 2 +- linkerd.io/content/2.11/tasks/troubleshooting.md | 2 +- linkerd.io/content/2.12/tasks/troubleshooting.md | 2 +- linkerd.io/content/2.13/tasks/troubleshooting.md | 2 +- linkerd.io/content/2.14/tasks/troubleshooting.md | 2 +- linkerd.io/content/2.15/tasks/troubleshooting.md | 2 +- linkerd.io/content/2.16/tasks/troubleshooting.md | 2 +- linkerd.io/content/2.17/tasks/troubleshooting.md | 2 +- linkerd.io/content/2.18/tasks/troubleshooting.md | 2 +- linkerd.io/content/2.19/tasks/troubleshooting.md | 2 +- 11 files changed, 11 insertions(+), 11 deletions(-) diff --git a/linkerd.io/content/2-edge/tasks/troubleshooting.md b/linkerd.io/content/2-edge/tasks/troubleshooting.md index 5586441c26..c65e8fb63c 100644 --- a/linkerd.io/content/2-edge/tasks/troubleshooting.md +++ b/linkerd.io/content/2-edge/tasks/troubleshooting.md @@ -1272,7 +1272,7 @@ Example failure: Ensure that all the CNI pods are running: ```bash -kubectl get po -n linkerd-cn +kubectl get po -n linkerd-cni NAME READY STATUS RESTARTS AGE linkerd-cni-rzp2q 1/1 Running 0 9m20s linkerd-cni-mf564 1/1 Running 0 9m22s diff --git a/linkerd.io/content/2.10/tasks/troubleshooting.md b/linkerd.io/content/2.10/tasks/troubleshooting.md index 19d5ec5ccb..d30f477a4d 100644 --- a/linkerd.io/content/2.10/tasks/troubleshooting.md +++ b/linkerd.io/content/2.10/tasks/troubleshooting.md @@ -1324,7 +1324,7 @@ Example failure: Ensure that all the CNI pods are running: ```bash -kubectl get po -n linkerd-cn +kubectl get po -n linkerd-cni NAME READY STATUS RESTARTS AGE linkerd-cni-rzp2q 1/1 Running 0 9m20s linkerd-cni-mf564 1/1 Running 0 9m22s diff --git a/linkerd.io/content/2.11/tasks/troubleshooting.md b/linkerd.io/content/2.11/tasks/troubleshooting.md index 78aa233661..10edc026ad 100644 --- a/linkerd.io/content/2.11/tasks/troubleshooting.md +++ b/linkerd.io/content/2.11/tasks/troubleshooting.md @@ -1471,7 +1471,7 @@ Example failure: Ensure that all the CNI pods are running: ```bash -kubectl get po -n linkerd-cn +kubectl get po -n linkerd-cni NAME READY STATUS RESTARTS AGE linkerd-cni-rzp2q 1/1 Running 0 9m20s linkerd-cni-mf564 1/1 Running 0 9m22s diff --git a/linkerd.io/content/2.12/tasks/troubleshooting.md b/linkerd.io/content/2.12/tasks/troubleshooting.md index 8e7bf49e3c..b142ee66a9 100644 --- a/linkerd.io/content/2.12/tasks/troubleshooting.md +++ b/linkerd.io/content/2.12/tasks/troubleshooting.md @@ -1243,7 +1243,7 @@ Example failure: Ensure that all the CNI pods are running: ```bash -kubectl get po -n linkerd-cn +kubectl get po -n linkerd-cni NAME READY STATUS RESTARTS AGE linkerd-cni-rzp2q 1/1 Running 0 9m20s linkerd-cni-mf564 1/1 Running 0 9m22s diff --git a/linkerd.io/content/2.13/tasks/troubleshooting.md b/linkerd.io/content/2.13/tasks/troubleshooting.md index 8e7bf49e3c..b142ee66a9 100644 --- a/linkerd.io/content/2.13/tasks/troubleshooting.md +++ b/linkerd.io/content/2.13/tasks/troubleshooting.md @@ -1243,7 +1243,7 @@ Example failure: Ensure that all the CNI pods are running: ```bash -kubectl get po -n linkerd-cn +kubectl get po -n linkerd-cni NAME READY STATUS RESTARTS AGE linkerd-cni-rzp2q 1/1 Running 0 9m20s linkerd-cni-mf564 1/1 Running 0 9m22s diff --git a/linkerd.io/content/2.14/tasks/troubleshooting.md b/linkerd.io/content/2.14/tasks/troubleshooting.md index 8e7bf49e3c..b142ee66a9 100644 --- a/linkerd.io/content/2.14/tasks/troubleshooting.md +++ b/linkerd.io/content/2.14/tasks/troubleshooting.md @@ -1243,7 +1243,7 @@ Example failure: Ensure that all the CNI pods are running: ```bash -kubectl get po -n linkerd-cn +kubectl get po -n linkerd-cni NAME READY STATUS RESTARTS AGE linkerd-cni-rzp2q 1/1 Running 0 9m20s linkerd-cni-mf564 1/1 Running 0 9m22s diff --git a/linkerd.io/content/2.15/tasks/troubleshooting.md b/linkerd.io/content/2.15/tasks/troubleshooting.md index e8fd89470b..2c57453aa6 100644 --- a/linkerd.io/content/2.15/tasks/troubleshooting.md +++ b/linkerd.io/content/2.15/tasks/troubleshooting.md @@ -1258,7 +1258,7 @@ Example failure: Ensure that all the CNI pods are running: ```bash -kubectl get po -n linkerd-cn +kubectl get po -n linkerd-cni NAME READY STATUS RESTARTS AGE linkerd-cni-rzp2q 1/1 Running 0 9m20s linkerd-cni-mf564 1/1 Running 0 9m22s diff --git a/linkerd.io/content/2.16/tasks/troubleshooting.md b/linkerd.io/content/2.16/tasks/troubleshooting.md index e8fd89470b..2c57453aa6 100644 --- a/linkerd.io/content/2.16/tasks/troubleshooting.md +++ b/linkerd.io/content/2.16/tasks/troubleshooting.md @@ -1258,7 +1258,7 @@ Example failure: Ensure that all the CNI pods are running: ```bash -kubectl get po -n linkerd-cn +kubectl get po -n linkerd-cni NAME READY STATUS RESTARTS AGE linkerd-cni-rzp2q 1/1 Running 0 9m20s linkerd-cni-mf564 1/1 Running 0 9m22s diff --git a/linkerd.io/content/2.17/tasks/troubleshooting.md b/linkerd.io/content/2.17/tasks/troubleshooting.md index cf72a5b982..79bacd3f7b 100644 --- a/linkerd.io/content/2.17/tasks/troubleshooting.md +++ b/linkerd.io/content/2.17/tasks/troubleshooting.md @@ -1272,7 +1272,7 @@ Example failure: Ensure that all the CNI pods are running: ```bash -kubectl get po -n linkerd-cn +kubectl get po -n linkerd-cni NAME READY STATUS RESTARTS AGE linkerd-cni-rzp2q 1/1 Running 0 9m20s linkerd-cni-mf564 1/1 Running 0 9m22s diff --git a/linkerd.io/content/2.18/tasks/troubleshooting.md b/linkerd.io/content/2.18/tasks/troubleshooting.md index 396b0dcda9..1fdeb9710b 100644 --- a/linkerd.io/content/2.18/tasks/troubleshooting.md +++ b/linkerd.io/content/2.18/tasks/troubleshooting.md @@ -1272,7 +1272,7 @@ Example failure: Ensure that all the CNI pods are running: ```bash -kubectl get po -n linkerd-cn +kubectl get po -n linkerd-cni NAME READY STATUS RESTARTS AGE linkerd-cni-rzp2q 1/1 Running 0 9m20s linkerd-cni-mf564 1/1 Running 0 9m22s diff --git a/linkerd.io/content/2.19/tasks/troubleshooting.md b/linkerd.io/content/2.19/tasks/troubleshooting.md index 5586441c26..c65e8fb63c 100644 --- a/linkerd.io/content/2.19/tasks/troubleshooting.md +++ b/linkerd.io/content/2.19/tasks/troubleshooting.md @@ -1272,7 +1272,7 @@ Example failure: Ensure that all the CNI pods are running: ```bash -kubectl get po -n linkerd-cn +kubectl get po -n linkerd-cni NAME READY STATUS RESTARTS AGE linkerd-cni-rzp2q 1/1 Running 0 9m20s linkerd-cni-mf564 1/1 Running 0 9m22s From 37bdbd4049ecc1c8f55bcde066238f09693d26aa Mon Sep 17 00:00:00 2001 From: Beza Date: Sun, 21 Dec 2025 19:49:23 -0300 Subject: [PATCH 03/31] remove ~ on comment block Signed-off-by: Beza --- linkerd.io/content/2.17/tasks/managing-egress-traffic.md | 2 +- linkerd.io/content/2.18/tasks/managing-egress-traffic.md | 2 +- linkerd.io/content/2.19/tasks/managing-egress-traffic.md | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/linkerd.io/content/2.17/tasks/managing-egress-traffic.md b/linkerd.io/content/2.17/tasks/managing-egress-traffic.md index cbcd48c416..d579ba4c56 100644 --- a/linkerd.io/content/2.17/tasks/managing-egress-traffic.md +++ b/linkerd.io/content/2.17/tasks/managing-egress-traffic.md @@ -190,7 +190,7 @@ Interestingly enough though, if we go back to our client shell and we try to initiate HTTPS traffic to the same service, it will not be allowed: ```bash -~ curl -v https://httpbin.org/get +curl -v https://httpbin.org/get curl: (35) TLS connect error: error:00000000:lib(0)::reason(0) ``` diff --git a/linkerd.io/content/2.18/tasks/managing-egress-traffic.md b/linkerd.io/content/2.18/tasks/managing-egress-traffic.md index 518c4f72f7..a4a7155edd 100644 --- a/linkerd.io/content/2.18/tasks/managing-egress-traffic.md +++ b/linkerd.io/content/2.18/tasks/managing-egress-traffic.md @@ -235,7 +235,7 @@ Interestingly enough though, if we go back to our client shell and we try to initiate HTTPS traffic to the same service, it will not be allowed: ```bash -~ curl -v https://httpbin.org/get +curl -v https://httpbin.org/get curl: (35) TLS connect error: error:00000000:lib(0)::reason(0) ``` diff --git a/linkerd.io/content/2.19/tasks/managing-egress-traffic.md b/linkerd.io/content/2.19/tasks/managing-egress-traffic.md index 518c4f72f7..a4a7155edd 100644 --- a/linkerd.io/content/2.19/tasks/managing-egress-traffic.md +++ b/linkerd.io/content/2.19/tasks/managing-egress-traffic.md @@ -235,7 +235,7 @@ Interestingly enough though, if we go back to our client shell and we try to initiate HTTPS traffic to the same service, it will not be allowed: ```bash -~ curl -v https://httpbin.org/get +curl -v https://httpbin.org/get curl: (35) TLS connect error: error:00000000:lib(0)::reason(0) ``` From 38f6d7a0cb6115c176b73fc7625b75b38b3b4fba Mon Sep 17 00:00:00 2001 From: beza Date: Thu, 29 Jan 2026 08:42:19 -0300 Subject: [PATCH 04/31] Update linkerd.io/content/2-edge/reference/iptables.md Co-authored-by: Flynn --- linkerd.io/content/2-edge/reference/iptables.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/linkerd.io/content/2-edge/reference/iptables.md b/linkerd.io/content/2-edge/reference/iptables.md index 9b4d229a59..44f16fb25a 100644 --- a/linkerd.io/content/2-edge/reference/iptables.md +++ b/linkerd.io/content/2-edge/reference/iptables.md @@ -164,7 +164,7 @@ Alternatively, if you want to inspect the iptables rules created for a pod, you can retrieve them through the following command: ```bash -kubectl -n logs linkerd-init +kubectl -n logs linkerd-init # where is the name of the pod # you want to see the iptables rules for ``` From 679e4bde07e587247b2574817a34e2811f38633c Mon Sep 17 00:00:00 2001 From: beza Date: Thu, 29 Jan 2026 08:42:26 -0300 Subject: [PATCH 05/31] Update linkerd.io/content/2-edge/tasks/managing-egress-traffic.md Co-authored-by: Flynn --- linkerd.io/content/2-edge/tasks/managing-egress-traffic.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/linkerd.io/content/2-edge/tasks/managing-egress-traffic.md b/linkerd.io/content/2-edge/tasks/managing-egress-traffic.md index a4a7155edd..d851205248 100644 --- a/linkerd.io/content/2-edge/tasks/managing-egress-traffic.md +++ b/linkerd.io/content/2-edge/tasks/managing-egress-traffic.md @@ -458,7 +458,7 @@ Now let's verify all works as expected: ```bash # plaintext traffic goes as expected to the /get path -curl http://httpbin.org/get +curl http://httpbin.org/get { "args": {}, "headers": { From c7a1dc805747ebff989c005dd5954ce2de41f7b2 Mon Sep 17 00:00:00 2001 From: beza Date: Thu, 29 Jan 2026 08:42:33 -0300 Subject: [PATCH 06/31] Update linkerd.io/content/2-edge/tasks/multicluster-using-statefulsets.md Co-authored-by: Flynn --- .../content/2-edge/tasks/multicluster-using-statefulsets.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/linkerd.io/content/2-edge/tasks/multicluster-using-statefulsets.md b/linkerd.io/content/2-edge/tasks/multicluster-using-statefulsets.md index df55b3ee41..645a5619d9 100644 --- a/linkerd.io/content/2-edge/tasks/multicluster-using-statefulsets.md +++ b/linkerd.io/content/2-edge/tasks/multicluster-using-statefulsets.md @@ -260,7 +260,7 @@ kubectl --context=k3d-east exec curl-56dc7d945d-96r6p -it -c curl -- sh # however, the service and cluster domain will now be different, since we # are in a different cluster. # -/ curl nginx-set-0.nginx-svc-k3d-west.default.svc.east.cluster.local +curl nginx-set-0.nginx-svc-k3d-west.default.svc.east.cluster.local From 0e2ad0b92b8417ef9300e89c1a3151f3e8da0109 Mon Sep 17 00:00:00 2001 From: bezarsnba Date: Sat, 31 Jan 2026 11:13:12 -0300 Subject: [PATCH 07/31] adjust block with multi-command 2.10 to 2.12 Signed-off-by: bezarsnba --- .../content/2-edge/getting-started/_index.md | 4 +- .../content/2-edge/reference/cli/check.md | 2 +- .../tasks/rotating_webhooks_certificates.md | 20 +++++---- .../2.10/tasks/securing-your-cluster.md | 14 +++--- .../2.11/tasks/getting-per-route-metrics.md | 8 ++-- .../2.12/tasks/uninstall-multicluster.md | 1 + .../content/2.15/tasks/distributed-tracing.md | 10 ++--- .../2.15/tasks/getting-per-route-metrics.md | 6 +-- .../tasks/multicluster-using-statefulsets.md | 44 +++++++++---------- linkerd.io/content/2.15/tasks/multicluster.md | 10 ++--- .../content/2.15/tasks/troubleshooting.md | 12 ++--- linkerd.io/content/2.15/tasks/uninstall.md | 6 +-- 12 files changed, 70 insertions(+), 67 deletions(-) diff --git a/linkerd.io/content/2-edge/getting-started/_index.md b/linkerd.io/content/2-edge/getting-started/_index.md index ef3519b59c..cf8c40d543 100644 --- a/linkerd.io/content/2-edge/getting-started/_index.md +++ b/linkerd.io/content/2-edge/getting-started/_index.md @@ -60,8 +60,8 @@ To install the CLI manually, run: ```bash # Setting LINKERD2_VERSION sets the version to install. # If unset, you'll get the latest available edge version. -export LINKERD2_VERSION={{< edge-version >}} -curl --proto '=https' --tlsv1.2 -sSfL https://run.linkerd.io/install-edge | sh +$ export LINKERD2_VERSION={{< edge-version >}} +$ curl --proto '=https' --tlsv1.2 -sSfL https://run.linkerd.io/install-edge | sh ``` Be sure to follow the instructions to add it to your path: diff --git a/linkerd.io/content/2-edge/reference/cli/check.md b/linkerd.io/content/2-edge/reference/cli/check.md index 67a2486908..7cd61cd237 100644 --- a/linkerd.io/content/2-edge/reference/cli/check.md +++ b/linkerd.io/content/2-edge/reference/cli/check.md @@ -12,7 +12,7 @@ for a full list of all the possible checks, what they do and how to fix them. ## Example output ```bash -linkerd check +$ linkerd check kubernetes-api -------------- √ can initialize the client diff --git a/linkerd.io/content/2.10/tasks/rotating_webhooks_certificates.md b/linkerd.io/content/2.10/tasks/rotating_webhooks_certificates.md index bfa5b61e2d..9585798983 100644 --- a/linkerd.io/content/2.10/tasks/rotating_webhooks_certificates.md +++ b/linkerd.io/content/2.10/tasks/rotating_webhooks_certificates.md @@ -55,9 +55,9 @@ for idx in "${!SECRETS[@]}"; do \ kubectl -n "${NS[$idx]}" delete secret "${SECRETS[$idx]}"; \ done -linkerd upgrade | kubectl apply -f - -linkerd viz install | kubectl apply -f - -linkerd jaeger install | kubectl apply -f - +$ linkerd upgrade | kubectl apply -f - +$ linkerd viz install | kubectl apply -f - +$ linkerd jaeger install | kubectl apply -f - ``` The above command will recreate the secrets without restarting Linkerd. @@ -74,6 +74,7 @@ they wil be overwritten by a new cert and key generated by the helm chart. Confirm that the secrets are recreated with new certificates: + ```bash for idx in "${!SECRETS[@]}"; do \ kubectl -n "${NS[$idx]}" get secret "${SECRETS[$idx]}" -ojsonpath='{.data.crt\.pem}' | \ @@ -85,8 +86,9 @@ done Ensure that Linkerd remains healthy: + ```bash -linkerd check +$ linkerd check ``` Restarting the pods that implement the webhooks and API services is usually not @@ -97,10 +99,10 @@ If you observe certificate expiry errors or mismatched CA certs, restart their pods with: ```sh -kubectl -n linkerd rollout restart deploy \ - linkerd-proxy-injector \ - linkerd-sp-validator \ +$ kubectl -n linkerd rollout restart deploy \ + linkerd-proxy-injector \ + linkerd-sp-validator \ -kubectl -n linkerd-viz rollout restart deploy tap tap-injector -kubectl -n linkerd-jaeger rollout restart deploy jaeger-injector +$ kubectl -n linkerd-viz rollout restart deploy tap tap-injector +$ kubectl -n linkerd-jaeger rollout restart deploy jaeger-injector ``` diff --git a/linkerd.io/content/2.10/tasks/securing-your-cluster.md b/linkerd.io/content/2.10/tasks/securing-your-cluster.md index 6c0efb9462..6d67ae05a7 100644 --- a/linkerd.io/content/2.10/tasks/securing-your-cluster.md +++ b/linkerd.io/content/2.10/tasks/securing-your-cluster.md @@ -54,7 +54,7 @@ kubectl auth can-i watch deployments.tap.linkerd.io -n emojivoto --as $(whoami) You can also use the Linkerd CLI's `--as` flag to confirm: ```bash -linkerd viz tap -n linkerd deploy/linkerd-controller --as $(whoami) +$ linkerd viz tap -n linkerd deploy/linkerd-controller --as $(whoami) Cannot connect to Linkerd Viz: namespaces is forbidden: User "XXXX" cannot list resource "namespaces" in API group "" at the cluster scope Validate the install with: linkerd viz check ... @@ -71,7 +71,7 @@ To enable tap access to all resources in all namespaces, you may bind your user to the `linkerd-linkerd-tap-admin` ClusterRole, installed by default: ```bash -kubectl describe clusterroles/linkerd-linkerd-viz-tap-admin +$ kubectl describe clusterroles/linkerd-linkerd-viz-tap-admin Name: linkerd-linkerd-viz-tap-admin Labels: component=tap linkerd.io/extension=viz @@ -103,7 +103,7 @@ kubectl create clusterrolebinding \ You can verify you now have tap access with: ```bash -linkerd viz tap -n linkerd deploy/linkerd-controller --as $(whoami) +$ linkerd viz tap -n linkerd deploy/linkerd-controller --as $(whoami) req id=3:0 proxy=in src=10.244.0.1:37392 dst=10.244.0.13:9996 tls=not_provided_by_remote :method=GET :authority=10.244.0.13:9996 :path=/ping ... ``` @@ -137,14 +137,14 @@ Because GCloud provides this additional level of access, there are cases where not. To validate this, check whether your GCloud user has Tap access: ```bash -kubectl auth can-i watch pods.tap.linkerd.io --all-namespaces +$ kubectl auth can-i watch pods.tap.linkerd.io --all-namespaces yes ``` And then validate whether your RBAC user has Tap access: ```bash -kubectl auth can-i watch pods.tap.linkerd.io --all-namespaces --as $(gcloud config get-value account) +$ kubectl auth can-i watch pods.tap.linkerd.io --all-namespaces --as $(gcloud config get-value account) no - no RBAC policy matched ``` @@ -181,14 +181,14 @@ privileges necessary to tap resources. To confirm: ```bash -kubectl auth can-i watch pods.tap.linkerd.io --all-namespaces --as system:serviceaccount:linkerd-viz:web +$ kubectl auth can-i watch pods.tap.linkerd.io --all-namespaces --as system:serviceaccount:linkerd-viz:web yes ``` This access is enabled via a `linkerd-linkerd-viz-web-admin` ClusterRoleBinding: ```bash -kubectl describe clusterrolebindings/linkerd-linkerd-viz-web-admin +$ kubectl describe clusterrolebindings/linkerd-linkerd-viz-web-admin Name: linkerd-linkerd-viz-web-admin Labels: component=web linkerd.io/extensions=viz diff --git a/linkerd.io/content/2.11/tasks/getting-per-route-metrics.md b/linkerd.io/content/2.11/tasks/getting-per-route-metrics.md index 7d6120773c..e07c7de196 100644 --- a/linkerd.io/content/2.11/tasks/getting-per-route-metrics.md +++ b/linkerd.io/content/2.11/tasks/getting-per-route-metrics.md @@ -14,7 +14,7 @@ For a tutorial that shows this functionality off, check out the You can view per-route metrics in the CLI by running `linkerd viz routes`: ```bash -linkerd viz routes svc/webapp +$ linkerd viz routes svc/webapp ROUTE SERVICE SUCCESS RPS LATENCY_P50 LATENCY_P95 LATENCY_P99 GET / webapp 100.00% 0.6rps 25ms 30ms 30ms GET /authors/{id} webapp 100.00% 0.6rps 22ms 29ms 30ms @@ -34,7 +34,7 @@ specified in your service profile will end up there. It is also possible to look the metrics up by other resource types, such as: ```bash -linkerd viz routes deploy/webapp +$ linkerd viz routes deploy/webapp ROUTE SERVICE SUCCESS RPS LATENCY_P50 LATENCY_P95 LATENCY_P99 [DEFAULT] kubernetes 0.00% 0.0rps 0ms 0ms 0ms GET / webapp 100.00% 0.5rps 27ms 38ms 40ms @@ -53,7 +53,7 @@ Then, it is possible to filter all the way down to requests going from a specific resource to other services: ```bash -linkerd viz routes deploy/webapp --to svc/books +$ linkerd viz routes deploy/webapp --to svc/books ROUTE SERVICE SUCCESS RPS LATENCY_P50 LATENCY_P95 LATENCY_P99 DELETE /books/{id}.json books 100.00% 0.5rps 18ms 29ms 30ms GET /books.json books 100.00% 1.1rps 7ms 12ms 18ms @@ -76,7 +76,7 @@ linkerd viz tap deploy/webapp -o wide | grep req A sample output is: ```bash -req id=3:1 proxy=in src=10.4.0.14:58562 dst=10.4.1.4:7000 tls=disabled :method=POST :authority=webapp:7000 :path=/books/24783/edit src_res=deploy/traffic src_ns=default dst_res=deploy/webapp dst_ns=default rt_route=POST /books/{id}/edit +$ req id=3:1 proxy=in src=10.4.0.14:58562 dst=10.4.1.4:7000 tls=disabled :method=POST :authority=webapp:7000 :path=/books/24783/edit src_res=deploy/traffic src_ns=default dst_res=deploy/webapp dst_ns=default rt_route=POST /books/{id}/edit ``` This will select only the requests observed and show the `:authority` and diff --git a/linkerd.io/content/2.12/tasks/uninstall-multicluster.md b/linkerd.io/content/2.12/tasks/uninstall-multicluster.md index 0b850fe583..71b3761399 100644 --- a/linkerd.io/content/2.12/tasks/uninstall-multicluster.md +++ b/linkerd.io/content/2.12/tasks/uninstall-multicluster.md @@ -24,6 +24,7 @@ up correctly and are not left orphaned. To unlink, run the `linkerd multicluster unlink` command and pipe the output to `kubectl delete`: + ```bash linkerd multicluster unlink --cluster-name=target | kubectl delete -f - ``` diff --git a/linkerd.io/content/2.15/tasks/distributed-tracing.md b/linkerd.io/content/2.15/tasks/distributed-tracing.md index 9527f5d825..467947fd5b 100644 --- a/linkerd.io/content/2.15/tasks/distributed-tracing.md +++ b/linkerd.io/content/2.15/tasks/distributed-tracing.md @@ -98,7 +98,7 @@ With `vote-bot` starting traces for every request, spans should now be showing up in Jaeger. To get to the UI, run: ```bash -linkerd jaeger dashboard +$ linkerd jaeger dashboard ``` ![Jaeger](/docs/images/tracing/jaeger-empty.png "Jaeger") @@ -144,8 +144,8 @@ To cleanup, uninstall the Linkerd-Jaeger extension along with emojivoto by running: ```bash -linkerd jaeger uninstall | kubectl delete -f - -kubectl delete ns emojivoto +$ linkerd jaeger uninstall | kubectl delete -f - +$ kubectl delete ns emojivoto ``` ## Bring your own Jaeger @@ -158,7 +158,7 @@ Create the following YAML file which disables the built in Jaeger instance and specifies the OpenCensus collector's config. ```bash -cat < jaeger-linkerd.yaml +$ cat < jaeger-linkerd.yaml jaeger: enabled: false collector: @@ -193,7 +193,7 @@ collector: processors: [batch] exporters: [jaeger] EOF -linkerd jaeger install --values ./jaeger-linkerd.yaml | kubectl apply -f - +$ linkerd jaeger install --values ./jaeger-linkerd.yaml | kubectl apply -f - ``` You'll want to ensure that the `exporters.jaeger.endpoint` which is diff --git a/linkerd.io/content/2.15/tasks/getting-per-route-metrics.md b/linkerd.io/content/2.15/tasks/getting-per-route-metrics.md index c2db8c0965..34ee2bff6a 100644 --- a/linkerd.io/content/2.15/tasks/getting-per-route-metrics.md +++ b/linkerd.io/content/2.15/tasks/getting-per-route-metrics.md @@ -24,7 +24,7 @@ per-route authorization. You can view per-route metrics in the CLI by running `linkerd viz routes`: ```bash -linkerd viz routes svc/webapp +$ linkerd viz routes svc/webapp ROUTE SERVICE SUCCESS RPS LATENCY_P50 LATENCY_P95 LATENCY_P99 GET / webapp 100.00% 0.6rps 25ms 30ms 30ms GET /authors/{id} webapp 100.00% 0.6rps 22ms 29ms 30ms @@ -44,7 +44,7 @@ specified in your service profile will end up there. It is also possible to look the metrics up by other resource types, such as: ```bash -linkerd viz routes deploy/webapp +$ linkerd viz routes deploy/webapp ROUTE SERVICE SUCCESS RPS LATENCY_P50 LATENCY_P95 LATENCY_P99 [DEFAULT] kubernetes 0.00% 0.0rps 0ms 0ms 0ms GET / webapp 100.00% 0.5rps 27ms 38ms 40ms @@ -63,7 +63,7 @@ Then, it is possible to filter all the way down to requests going from a specific resource to other services: ```bash -linkerd viz routes deploy/webapp --to svc/books +$ linkerd viz routes deploy/webapp --to svc/books ROUTE SERVICE SUCCESS RPS LATENCY_P50 LATENCY_P95 LATENCY_P99 DELETE /books/{id}.json books 100.00% 0.5rps 18ms 29ms 30ms GET /books.json books 100.00% 1.1rps 7ms 12ms 18ms diff --git a/linkerd.io/content/2.15/tasks/multicluster-using-statefulsets.md b/linkerd.io/content/2.15/tasks/multicluster-using-statefulsets.md index c720c09563..0bdc7f7d24 100644 --- a/linkerd.io/content/2.15/tasks/multicluster-using-statefulsets.md +++ b/linkerd.io/content/2.15/tasks/multicluster-using-statefulsets.md @@ -48,8 +48,8 @@ The first step is to clone the demo repository on your local machine. ```sh # clone example repository -git clone git@github.com:mateiidavid/l2d-k3d-statefulset.git -cd l2d-k3d-statefulset +$ git clone git@github.com:mateiidavid/l2d-k3d-statefulset.git +$ cd l2d-k3d-statefulset ``` The second step consists of creating two `k3d` clusters named `east` and `west`, @@ -60,10 +60,10 @@ everything. ```sh # create k3d clusters -./create.sh +$ ./create.sh # list the clusters -k3d cluster list +$ k3d cluster list NAME SERVERS AGENTS LOADBALANCER east 1/1 0/0 true west 1/1 0/0 true @@ -78,10 +78,10 @@ provided scripts, but feel free to have a look! ```sh # Install Linkerd and multicluster, output to check should be a success -./install.sh +$ ./install.sh # Next, link the two clusters together -./link.sh +$ ./link.sh ``` Perfect! If you've made it this far with no errors, then it's a good sign. In @@ -101,17 +101,17 @@ communication. First, we will deploy our pods and services: ```sh # deploy services and mesh namespaces -./deploy.sh +$ ./deploy.sh # verify both clusters # # verify east -kubectl --context=k3d-east get pods +$ kubectl --context=k3d-east get pods NAME READY STATUS RESTARTS AGE curl-56dc7d945d-96r6p 2/2 Running 0 7s # verify west has headless service -kubectl --context=k3d-west get services +$ kubectl --context=k3d-west get services NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE kubernetes ClusterIP 10.43.0.1 443/TCP 10m nginx-svc ClusterIP None 80/TCP 8s @@ -119,7 +119,7 @@ nginx-svc ClusterIP None 80/TCP 8s # verify west has statefulset # # this may take a while to come up -kubectl --context=k3d-west get pods +$ kubectl --context=k3d-west get pods NAME READY STATUS RESTARTS AGE nginx-set-0 2/2 Running 0 53s nginx-set-1 2/2 Running 0 43s @@ -130,7 +130,7 @@ Before we go further, let's have a look at the endpoints object for the `nginx-svc`: ```sh -kubectl --context=k3d-west get endpoints nginx-svc -o yaml +$ kubectl --context=k3d-west get endpoints nginx-svc -o yaml ... subsets: - addresses: @@ -170,15 +170,15 @@ would get an answer back. We can test this out by applying the curl pod to the `west` cluster: ```sh -kubectl --context=k3d-west apply -f east/curl.yml -kubectl --context=k3d-west get pods +$ kubectl --context=k3d-west apply -f east/curl.yml +$ kubectl --context=k3d-west get pods NAME READY STATUS RESTARTS AGE nginx-set-0 2/2 Running 0 5m8s nginx-set-1 2/2 Running 0 4m58s nginx-set-2 2/2 Running 0 4m51s curl-56dc7d945d-s4n8j 0/2 PodInitializing 0 4s -kubectl --context=k3d-west exec -it curl-56dc7d945d-s4n8j -c curl -- bin/sh +$ kubectl --context=k3d-west exec -it curl-56dc7d945d-s4n8j -c curl -- bin/sh /# prompt for curl pod ``` @@ -186,7 +186,7 @@ If we now curl one of these instances, we will get back a response. ```sh # exec'd on the pod -/ curl nginx-set-0.nginx-svc.default.svc.west.cluster.local +/ $ curl nginx-set-0.nginx-svc.default.svc.west.cluster.local " @@ -218,10 +218,10 @@ Now, let's do the same, but this time from the `east` cluster. We will first export the service. ```sh -kubectl --context=k3d-west label service nginx-svc mirror.linkerd.io/exported="true" +$ kubectl --context=k3d-west label service nginx-svc mirror.linkerd.io/exported="true" service/nginx-svc labeled -kubectl --context=k3d-east get services +$ kubectl --context=k3d-east get services NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE kubernetes ClusterIP 10.43.0.1 443/TCP 20h nginx-svc-west ClusterIP None 80/TCP 29s @@ -251,17 +251,17 @@ cluster (`west`), will be mirrored as a clusterIP service. We will see in a second why this matters. ```sh -kubectl --context=k3d-east get pods +$ kubectl --context=k3d-east get pods NAME READY STATUS RESTARTS AGE curl-56dc7d945d-96r6p 2/2 Running 0 23m # exec and curl -kubectl --context=k3d-east exec pod curl-56dc7d945d-96r6p -it -c curl -- bin/sh +$ kubectl --context=k3d-east exec pod curl-56dc7d945d-96r6p -it -c curl -- bin/sh # we want to curl the same hostname we see in the endpoints object above. # however, the service and cluster domain will now be different, since we # are in a different cluster. # -/ curl nginx-set-0.nginx-svc-west.default.svc.east.cluster.local +/ $ curl nginx-set-0.nginx-svc-west.default.svc.east.cluster.local @@ -329,8 +329,8 @@ validation. To clean-up, you can remove both clusters entirely using the k3d CLI: ```sh -k3d cluster delete east +$ k3d cluster delete east cluster east deleted -k3d cluster delete west +$ k3d cluster delete west cluster west deleted ``` diff --git a/linkerd.io/content/2.15/tasks/multicluster.md b/linkerd.io/content/2.15/tasks/multicluster.md index f1af09cd21..ece25f2732 100644 --- a/linkerd.io/content/2.15/tasks/multicluster.md +++ b/linkerd.io/content/2.15/tasks/multicluster.md @@ -101,13 +101,13 @@ With a valid trust anchor and issuer credentials, we can install Linkerd on your ```bash # first, install the Linkerd CRDs in both clusters -linkerd install --crds \ +$ linkerd install --crds \ | tee \ >(kubectl --context=west apply -f -) \ >(kubectl --context=east apply -f -) # then install the Linkerd control plane in both clusters -linkerd install \ +$ linkerd install \ --identity-trust-anchors-file root.crt \ --identity-issuer-certificate-file issuer.crt \ --identity-issuer-key-file issuer.key \ @@ -120,7 +120,7 @@ And then Linkerd Viz: ```bash for ctx in west east; do - linkerd --context=${ctx} viz install | \ + linkerd --context=${ctx} viz install | \ kubectl --context=${ctx} apply -f - || break done ``` @@ -155,7 +155,7 @@ To install the multicluster components on both `west` and `east`, you can run: ```bash for ctx in west east; do echo "Installing on cluster: ${ctx} ........." - linkerd --context=${ctx} multicluster install | \ + linkerd --context=${ctx} multicluster install | \ kubectl --context=${ctx} apply -f - || break echo "-------------" done @@ -175,7 +175,7 @@ running: ```bash for ctx in west east; do echo "Checking gateway on cluster: ${ctx} ........." - kubectl --context=${ctx} -n linkerd-multicluster \ + kubectl --context=${ctx} -n linkerd-multicluster \ rollout status deploy/linkerd-gateway || break echo "-------------" done diff --git a/linkerd.io/content/2.15/tasks/troubleshooting.md b/linkerd.io/content/2.15/tasks/troubleshooting.md index 2c57453aa6..8b1d054813 100644 --- a/linkerd.io/content/2.15/tasks/troubleshooting.md +++ b/linkerd.io/content/2.15/tasks/troubleshooting.md @@ -132,9 +132,9 @@ For more information see these pages in the Kubernetes Documentation: Also verify that these command works: ```bash -kubectl config view -kubectl cluster-info -kubectl version +$ kubectl config view +$ kubectl cluster-info +$ kubectl version ``` Another example failure: @@ -1736,12 +1736,12 @@ Make sure that the `proxy-injector` is working correctly by running Ensure all the prometheus related resources are present and running correctly. ```bash -❯ kubectl -n linkerd-viz get deploy,cm | grep prometheus +$ kubectl -n linkerd-viz get deploy,cm | grep prometheus deployment.apps/prometheus 1/1 1 1 3m18s configmap/prometheus-config 1 3m18s -❯ kubectl get clusterRoleBindings | grep prometheus +$ kubectl get clusterRoleBindings | grep prometheus linkerd-linkerd-viz-prometheus ClusterRole/linkerd-linkerd-viz-prometheus 3m37s -❯ kubectl get clusterRoles | grep prometheus +$ kubectl get clusterRoles | grep prometheus linkerd-linkerd-viz-prometheus 2021-02-26T06:03:11Zh ``` diff --git a/linkerd.io/content/2.15/tasks/uninstall.md b/linkerd.io/content/2.15/tasks/uninstall.md index bf10a09044..5c745b28a0 100644 --- a/linkerd.io/content/2.15/tasks/uninstall.md +++ b/linkerd.io/content/2.15/tasks/uninstall.md @@ -21,13 +21,13 @@ To remove any extension, call its `uninstall` subcommand and pipe it to ```bash # To remove Linkerd Viz -linkerd viz uninstall | kubectl delete -f - +$ linkerd viz uninstall | kubectl delete -f - # To remove Linkerd Jaeger -linkerd jaeger uninstall | kubectl delete -f - +$ linkerd jaeger uninstall | kubectl delete -f - # To remove Linkerd Multicluster -linkerd multicluster uninstall | kubectl delete -f - +$ linkerd multicluster uninstall | kubectl delete -f - ``` ## Removing the control plane From 9585281502cc95d97f2854d2086363fd8f4ebfce Mon Sep 17 00:00:00 2001 From: bezarsnba Date: Sat, 31 Jan 2026 12:46:11 -0300 Subject: [PATCH 08/31] revision troubleshooting files and others Signed-off-by: bezarsnba --- .../content/2.10/tasks/troubleshooting.md | 158 +++++++++--------- .../content/2.11/tasks/troubleshooting.md | 130 +++++++------- .../content/2.12/tasks/troubleshooting.md | 104 ++++++------ .../2.12/tasks/uninstall-multicluster.md | 1 - .../2.13/tasks/getting-per-route-metrics.md | 8 +- .../tasks/multicluster-using-statefulsets.md | 46 ++--- .../content/2.13/tasks/restricting-access.md | 4 +- .../2.13/tasks/securing-linkerd-tap.md | 16 +- .../content/2.13/tasks/troubleshooting.md | 100 +++++------ .../2.13/tasks/uninstall-multicluster.md | 4 +- .../content/2.14/tasks/troubleshooting.md | 108 ++++++------ .../content/2.15/tasks/troubleshooting.md | 82 ++++----- .../content/2.16/tasks/troubleshooting.md | 102 +++++------ .../content/2.17/tasks/troubleshooting.md | 98 +++++------ .../content/2.18/tasks/troubleshooting.md | 100 +++++------ .../content/2.19/tasks/troubleshooting.md | 98 +++++------ 16 files changed, 579 insertions(+), 580 deletions(-) diff --git a/linkerd.io/content/2.10/tasks/troubleshooting.md b/linkerd.io/content/2.10/tasks/troubleshooting.md index d30f477a4d..7513085c82 100644 --- a/linkerd.io/content/2.10/tasks/troubleshooting.md +++ b/linkerd.io/content/2.10/tasks/troubleshooting.md @@ -26,7 +26,7 @@ installation, that namespace should not exist. To check with a different namespace, run: ```bash -linkerd check --pre --linkerd-namespace linkerd-test +$ linkerd check --pre --linkerd-namespace linkerd-test ``` ### √ can create Kubernetes resources {#pre-k8s-cluster-k8s} @@ -266,16 +266,16 @@ and also in the context of a multi-stage setup, for example: ```bash # install cluster-wide resources (first stage) -linkerd install config | kubectl apply -f - +$ linkerd install config | kubectl apply -f - # validate successful cluster-wide resources installation -linkerd check config +$ linkerd check config # install Linkerd control plane -linkerd install control-plane | kubectl apply -f - +$ linkerd install control-plane | kubectl apply -f - # validate successful control-plane installation -linkerd check +$ linkerd check ``` ### √ control plane Namespace exists {#l5d-existence-ns} @@ -314,7 +314,7 @@ Example failure: Ensure the Linkerd ClusterRoles exist: ```bash -kubectl get clusterroles | grep linkerd +$ kubectl get clusterroles | grep linkerd linkerd-linkerd-destination 9d linkerd-linkerd-identity 9d linkerd-linkerd-proxy-injector 9d @@ -323,7 +323,7 @@ linkerd-linkerd-proxy-injector 9d Also ensure you have permission to create ClusterRoles: ```bash -kubectl auth can-i create clusterroles +$ kubectl auth can-i create clusterroles yes ``` @@ -340,7 +340,7 @@ Example failure: Ensure the Linkerd ClusterRoleBindings exist: ```bash -kubectl get clusterrolebindings | grep linkerd +$ kubectl get clusterrolebindings | grep linkerd linkerd-linkerd-destination 9d linkerd-linkerd-identity 9d linkerd-linkerd-proxy-injector 9d @@ -349,7 +349,7 @@ linkerd-linkerd-proxy-injector 9d Also ensure you have permission to create ClusterRoleBindings: ```bash -kubectl auth can-i create clusterrolebindings +$ kubectl auth can-i create clusterrolebindings yes ``` @@ -366,7 +366,7 @@ Example failure: Ensure the Linkerd ServiceAccounts exist: ```bash -kubectl -n linkerd get serviceaccounts +$ kubectl -n linkerd get serviceaccounts NAME SECRETS AGE default 1 14m linkerd-destination 1 14m @@ -379,7 +379,7 @@ Also ensure you have permission to create ServiceAccounts in the Linkerd namespace: ```bash -kubectl -n linkerd auth can-i create serviceaccounts +$ kubectl -n linkerd auth can-i create serviceaccounts yes ``` @@ -396,7 +396,7 @@ Example failure: Ensure the Linkerd CRD exists: ```bash -kubectl get customresourcedefinitions +$ kubectl get customresourcedefinitions NAME CREATED AT serviceprofiles.linkerd.io 2019-04-25T21:47:31Z ``` @@ -404,7 +404,7 @@ serviceprofiles.linkerd.io 2019-04-25T21:47:31Z Also ensure you have permission to create CRDs: ```bash -kubectl auth can-i create customresourcedefinitions +$ kubectl auth can-i create customresourcedefinitions yes ``` @@ -421,14 +421,14 @@ Example failure: Ensure the Linkerd MutatingWebhookConfigurations exists: ```bash -kubectl get mutatingwebhookconfigurations | grep linkerd +$ kubectl get mutatingwebhookconfigurations | grep linkerd linkerd-proxy-injector-webhook-config 2019-07-01T13:13:26Z ``` Also ensure you have permission to create MutatingWebhookConfigurations: ```bash -kubectl auth can-i create mutatingwebhookconfigurations +$ kubectl auth can-i create mutatingwebhookconfigurations yes ``` @@ -445,14 +445,14 @@ Example failure: Ensure the Linkerd ValidatingWebhookConfiguration exists: ```bash -kubectl get validatingwebhookconfigurations | grep linkerd +$ kubectl get validatingwebhookconfigurations | grep linkerd linkerd-sp-validator-webhook-config 2019-07-01T13:13:26Z ``` Also ensure you have permission to create ValidatingWebhookConfigurations: ```bash -kubectl auth can-i create validatingwebhookconfigurations +$ kubectl auth can-i create validatingwebhookconfigurations yes ``` @@ -469,14 +469,14 @@ Example failure: Ensure the Linkerd PodSecurityPolicy exists: ```bash -kubectl get podsecuritypolicies | grep linkerd +$ kubectl get podsecuritypolicies | grep linkerd linkerd-linkerd-control-plane false NET_ADMIN,NET_RAW RunAsAny RunAsAny MustRunAs MustRunAs true configMap,emptyDir,secret,projected,downwardAPI,persistentVolumeClaim ``` Also ensure you have permission to create PodSecurityPolicies: ```bash -kubectl auth can-i create podsecuritypolicies +$ kubectl auth can-i create podsecuritypolicies yes ``` @@ -495,7 +495,7 @@ Example failure: Ensure the Linkerd ConfigMap exists: ```bash -kubectl -n linkerd get configmap/linkerd-config +$ kubectl -n linkerd get configmap/linkerd-config NAME DATA AGE linkerd-config 3 61m ``` @@ -503,7 +503,7 @@ linkerd-config 3 61m Also ensure you have permission to create ConfigMaps: ```bash -kubectl -n linkerd auth can-i create configmap +$ kubectl -n linkerd auth can-i create configmap yes ``` @@ -820,7 +820,7 @@ Example failure: Verify the state of the control plane pods with: ```bash -kubectl -n linkerd get po +$ kubectl -n linkerd get po NAME READY STATUS RESTARTS AGE linkerd-destination-5fd7b5d466-szgqm 2/2 Running 1 12m linkerd-identity-54df78c479-hbh5m 2/2 Running 0 12m @@ -855,11 +855,11 @@ This check indicates a connectivity failure between the cli and the Linkerd control plane. To verify connectivity, manually connect to a control plane pod: ```bash -kubectl -n linkerd port-forward \ +$ kubectl -n linkerd port-forward \ $(kubectl -n linkerd get po \ --selector=linkerd.io/control-plane-component=identity \ -o jsonpath='{.items[*].metadata.name}') \ -9995:9995 + 9995:9995 ``` ...and then curl the `/metrics` endpoint: @@ -960,7 +960,7 @@ Ensure Prometheus can connect to each `linkerd-proxy` via the Prometheus dashboard: ```bash -kubectl -n linkerd port-forward svc/linkerd-prometheus 9090 +$ kubectl -n linkerd port-forward svc/linkerd-prometheus 9090 ``` ...and then browse to @@ -1051,7 +1051,7 @@ Ensure the kube-system namespace has the `config.linkerd.io/admission-webhooks:disabled` label: ```bash -kubectl get namespace kube-system -oyaml +$ kubectl get namespace kube-system -oyaml kind: Namespace apiVersion: v1 metadata: @@ -1124,7 +1124,7 @@ Example error: Ensure that the linkerd-cni-config ConfigMap exists in the CNI namespace: ```bash -kubectl get cm linkerd-cni-config -n linkerd-cni +$ kubectl get cm linkerd-cni-config -n linkerd-cni NAME PRIV CAPS SELINUX RUNASUSER FSGROUP SUPGROUP READONLYROOTFS VOLUMES linkerd-linkerd-cni-cni false RunAsAny RunAsAny RunAsAny RunAsAny false hostPath,secret ``` @@ -1132,7 +1132,7 @@ linkerd-linkerd-cni-cni false RunAsAny RunAsAny RunAsAny RunAs Also ensure you have permission to create ConfigMaps: ```bash -kubectl auth can-i create ConfigMaps +$ kubectl auth can-i create ConfigMaps yes ``` @@ -1149,7 +1149,7 @@ Example error: Ensure that the pod security policy exists: ```bash -kubectl get psp linkerd-linkerd-cni-cni +$ kubectl get psp linkerd-linkerd-cni-cni NAME PRIV CAPS SELINUX RUNASUSER FSGROUP SUPGROUP READONLYROOTFS VOLUMES linkerd-linkerd-cni-cni false RunAsAny RunAsAny RunAsAny RunAsAny false hostPath,secret ``` @@ -1157,7 +1157,7 @@ linkerd-linkerd-cni-cni false RunAsAny RunAsAny RunAsAny RunAs Also ensure you have permission to create PodSecurityPolicies: ```bash -kubectl auth can-i create PodSecurityPolicies +$ kubectl auth can-i create PodSecurityPolicies yes ``` @@ -1174,7 +1174,7 @@ Example error: Ensure that the cluster role exists: ```bash -kubectl get clusterrole linkerd-cni +$ kubectl get clusterrole linkerd-cni NAME AGE linkerd-cni 54m ``` @@ -1182,7 +1182,7 @@ linkerd-cni 54m Also ensure you have permission to create ClusterRoles: ```bash -kubectl auth can-i create ClusterRoles +$ kubectl auth can-i create ClusterRoles yes ``` @@ -1199,7 +1199,7 @@ Example error: Ensure that the cluster role binding exists: ```bash -kubectl get clusterrolebinding linkerd-cni +$ kubectl get clusterrolebinding linkerd-cni NAME AGE linkerd-cni 54m ``` @@ -1207,7 +1207,7 @@ linkerd-cni 54m Also ensure you have permission to create ClusterRoleBindings: ```bash -kubectl auth can-i create ClusterRoleBindings +$ kubectl auth can-i create ClusterRoleBindings yes ``` @@ -1224,7 +1224,7 @@ Example error: Ensure that the role exists in the CNI namespace: ```bash -kubectl get role linkerd-cni -n linkerd-cni +$ kubectl get role linkerd-cni -n linkerd-cni NAME AGE linkerd-cni 52m ``` @@ -1232,7 +1232,7 @@ linkerd-cni 52m Also ensure you have permission to create Roles: ```bash -kubectl auth can-i create Roles -n linkerd-cni +$ kubectl auth can-i create Roles -n linkerd-cni yes ``` @@ -1249,7 +1249,7 @@ Example error: Ensure that the role binding exists in the CNI namespace: ```bash -kubectl get rolebinding linkerd-cni -n linkerd-cni +$ kubectl get rolebinding linkerd-cni -n linkerd-cni NAME AGE linkerd-cni 49m ``` @@ -1257,7 +1257,7 @@ linkerd-cni 49m Also ensure you have permission to create RoleBindings: ```bash -kubectl auth can-i create RoleBindings -n linkerd-cni +$ kubectl auth can-i create RoleBindings -n linkerd-cni yes ``` @@ -1274,7 +1274,7 @@ Example error: Ensure that the CNI service account exists in the CNI namespace: ```bash -kubectl get ServiceAccount linkerd-cni -n linkerd-cni +$ kubectl get ServiceAccount linkerd-cni -n linkerd-cni NAME SECRETS AGE linkerd-cni 1 45m ``` @@ -1282,7 +1282,7 @@ linkerd-cni 1 45m Also ensure you have permission to create ServiceAccount: ```bash -kubectl auth can-i create ServiceAccounts -n linkerd-cni +$ kubectl auth can-i create ServiceAccounts -n linkerd-cni yes ``` @@ -1299,7 +1299,7 @@ Example error: Ensure that the CNI daemonset exists in the CNI namespace: ```bash -kubectl get ds -n linkerd-cni +$ kubectl get ds -n linkerd-cni NAME DESIRED CURRENT READY UP-TO-DATE AVAILABLE NODE SELECTOR AGE linkerd-cni 1 1 1 1 1 beta.kubernetes.io/os=linux 14m ``` @@ -1307,7 +1307,7 @@ linkerd-cni 1 1 1 1 1 beta.kubernet Also ensure you have permission to create DaemonSets: ```bash -kubectl auth can-i create DaemonSets -n linkerd-cni +$ kubectl auth can-i create DaemonSets -n linkerd-cni yes ``` @@ -1324,7 +1324,7 @@ Example failure: Ensure that all the CNI pods are running: ```bash -kubectl get po -n linkerd-cni +$ kubectl get po -n linkerd-cni NAME READY STATUS RESTARTS AGE linkerd-cni-rzp2q 1/1 Running 0 9m20s linkerd-cni-mf564 1/1 Running 0 9m22s @@ -1334,7 +1334,7 @@ linkerd-cni-p5670 1/1 Running 0 9m25s Ensure that all pods have finished the deployment of the CNI config and binary: ```bash -kubectl logs linkerd-cni-rzp2q -n linkerd-cni +$ kubectl logs linkerd-cni-rzp2q -n linkerd-cni Wrote linkerd CNI binaries to /host/opt/cni/bin Created CNI config /host/etc/cni/net.d/10-kindnet.conflist Done configuring CNI. Sleep=true @@ -1362,7 +1362,7 @@ Make sure multicluster extension is correctly installed and that the `links.multicluster.linkerd.io` CRD is present. ```bash -kubectl get crds | grep multicluster +$ kubectl get crds | grep multicluster NAME CREATED AT links.multicluster.linkerd.io 2021-03-10T09:58:10Z ``` @@ -1441,7 +1441,7 @@ the rules section. Expected rules for `linkerd-service-mirror-access-local-resources` cluster role: ```bash -kubectl --context=local get clusterrole linkerd-service-mirror-access-local-resources -o yaml +$ kubectl --context=local get clusterrole linkerd-service-mirror-access-local-resources -o yaml kind: ClusterRole metadata: labels: @@ -1474,7 +1474,7 @@ rules: Expected rules for `linkerd-service-mirror-read-remote-creds` role: ```bash -kubectl --context=local get role linkerd-service-mirror-read-remote-creds -n linkerd-multicluster -o yaml +$ kubectl --context=local get role linkerd-service-mirror-read-remote-creds -n linkerd-multicluster -o yaml kind: Role metadata: labels: @@ -1507,7 +1507,7 @@ everything to start up. If this is a permanent error, you'll want to validate the state of the controller pod with: ```bash -kubectl --all-namespaces get po --selector linkerd.io/control-plane-component=linkerd-service-mirror +$ kubectl --all-namespaces get po --selector linkerd.io/control-plane-component=linkerd-service-mirror NAME READY STATUS RESTARTS AGE linkerd-service-mirror-7bb8ff5967-zg265 2/2 Running 0 50m ``` @@ -1606,7 +1606,7 @@ Example failure: Ensure the linkerd-viz extension ClusterRoles exist: ```bash -kubectl get clusterroles | grep linkerd-viz +$ kubectl get clusterroles | grep linkerd-viz linkerd-linkerd-viz-metrics-api 2021-01-26T18:02:17Z linkerd-linkerd-viz-prometheus 2021-01-26T18:02:17Z linkerd-linkerd-viz-tap 2021-01-26T18:02:17Z @@ -1634,7 +1634,7 @@ Example failure: Ensure the linkerd-viz extension ClusterRoleBindings exist: ```bash -kubectl get clusterrolebindings | grep linkerd-viz +$ kubectl get clusterrolebindings | grep linkerd-viz linkerd-linkerd-viz-metrics-api ClusterRole/linkerd-linkerd-viz-metrics-api 18h linkerd-linkerd-viz-prometheus ClusterRole/linkerd-linkerd-viz-prometheus 18h linkerd-linkerd-viz-tap ClusterRole/linkerd-linkerd-viz-tap 18h @@ -1646,7 +1646,7 @@ linkerd-linkerd-viz-web-check ClusterRole/linkerd-linke Also ensure you have permission to create ClusterRoleBindings: ```bash -kubectl auth can-i create clusterrolebindings +$ kubectl auth can-i create clusterrolebindings yes ``` @@ -1718,7 +1718,7 @@ requirements in the cluster: Ensure all the linkerd-viz pods are injected ```bash -kubectl -n linkerd-viz get pods +$ kubectl -n linkerd-viz get pods NAME READY STATUS RESTARTS AGE grafana-68cddd7cc8-nrv4h 2/2 Running 3 18h metrics-api-77f684f7c7-hnw8r 2/2 Running 2 18h @@ -1742,7 +1742,7 @@ Make sure that the `proxy-injector` is working correctly by running Ensure all the linkerd-viz pods are running with 2/2 ```bash -kubectl -n linkerd-viz get pods +$ kubectl -n linkerd-viz get pods NAME READY STATUS RESTARTS AGE grafana-68cddd7cc8-nrv4h 2/2 Running 3 18h metrics-api-77f684f7c7-hnw8r 2/2 Running 2 18h @@ -1766,12 +1766,12 @@ Make sure that the `proxy-injector` is working correctly by running Ensure all the prometheus related resources are present and running correctly. ```bash -❯ kubectl -n linkerd-viz get deploy,cm | grep prometheus +$ kubectl -n linkerd-viz get deploy,cm | grep prometheus deployment.apps/prometheus 1/1 1 1 3m18s configmap/prometheus-config 1 3m18s -❯ kubectl get clusterRoleBindings | grep prometheus +$ kubectl get clusterRoleBindings | grep prometheus linkerd-linkerd-viz-prometheus ClusterRole/linkerd-linkerd-viz-prometheus 3m37s -❯ kubectl get clusterRoles | grep prometheus +$ kubectl get clusterRoles | grep prometheus linkerd-linkerd-viz-prometheus 2021-02-26T06:03:11Zh ``` @@ -1787,7 +1787,7 @@ Example failure: Verify that the metrics API pod is running correctly ```bash -❯ kubectl -n linkerd-viz get pods +$ kubectl -n linkerd-viz get pods NAME READY STATUS RESTARTS AGE metrics-api-7bb8cb8489-cbq4m 2/2 Running 0 4m58s tap-injector-6b9bc6fc4-cgbr4 2/2 Running 0 4m56s @@ -1847,7 +1847,7 @@ Example failure: Ensure the linkerd-jaeger ServiceAccounts exist: ```bash -kubectl -n linkerd-jaeger get serviceaccounts +$ kubectl -n linkerd-jaeger get serviceaccounts NAME SECRETS AGE collector 1 23m jaeger 1 23m @@ -1857,7 +1857,7 @@ Also ensure you have permission to create ServiceAccounts in the linkerd-jaeger namespace: ```bash -kubectl -n linkerd-jaeger auth can-i create serviceaccounts +$ kubectl -n linkerd-jaeger auth can-i create serviceaccounts yes ``` @@ -1874,7 +1874,7 @@ Example failure: Ensure the Linkerd ConfigMap exists: ```bash -kubectl -n linkerd-jaeger get configmap/collector-config +$ kubectl -n linkerd-jaeger get configmap/collector-config NAME DATA AGE collector-config 1 61m ``` @@ -1882,7 +1882,7 @@ collector-config 1 61m Also ensure you have permission to create ConfigMaps: ```bash -kubectl -n linkerd-jaeger auth can-i create configmap +$ kubectl -n linkerd-jaeger auth can-i create configmap yes ``` @@ -1897,7 +1897,7 @@ yes Ensure all the jaeger pods are injected ```bash -kubectl -n linkerd-jaeger get pods +$ kubectl -n linkerd-jaeger get pods NAME READY STATUS RESTARTS AGE collector-69cc44dfbc-rhpfg 2/2 Running 0 11s jaeger-6f98d5c979-scqlq 2/2 Running 0 11s @@ -1918,7 +1918,7 @@ Make sure that the `proxy-injector` is working correctly by running Ensure all the linkerd-jaeger pods are running with 2/2 ```bash -kubectl -n linkerd-jaeger get pods +$ kubectl -n linkerd-jaeger get pods NAME READY STATUS RESTARTS AGE jaeger-injector-548684d74b-bcq5h 2/2 Running 0 5s collector-69cc44dfbc-wqf6s 2/2 Running 0 5s @@ -2032,7 +2032,7 @@ linkerd-buoyant install | kubectl apply -f - Ensure that the cluster role exists: ```bash -kubectl get clusterrole buoyant-cloud-agent +$ kubectl get clusterrole buoyant-cloud-agent NAME CREATED AT buoyant-cloud-agent 2020-11-13T00:59:50Z ``` @@ -2040,7 +2040,7 @@ buoyant-cloud-agent 2020-11-13T00:59:50Z Also ensure you have permission to create ClusterRoles: ```bash -kubectl auth can-i create ClusterRoles +$ kubectl auth can-i create clusterroles yes ``` @@ -2055,7 +2055,7 @@ yes Ensure that the cluster role binding exists: ```bash -kubectl get clusterrolebinding buoyant-cloud-agent +$ kubectl get clusterrolebinding buoyant-cloud-agent NAME ROLE AGE buoyant-cloud-agent ClusterRole/buoyant-cloud-agent 301d ``` @@ -2063,7 +2063,7 @@ buoyant-cloud-agent ClusterRole/buoyant-cloud-agent 301d Also ensure you have permission to create ClusterRoleBindings: ```bash -kubectl auth can-i create ClusterRoleBindings +$ kubectl auth can-i create ClusterRoleBindings yes ``` @@ -2078,7 +2078,7 @@ yes Ensure that the service account exists: ```bash -kubectl -n buoyant-cloud get serviceaccount buoyant-cloud-agent +$ kubectl -n buoyant-cloud get serviceaccount buoyant-cloud-agent NAME SECRETS AGE buoyant-cloud-agent 1 301d ``` @@ -2086,7 +2086,7 @@ buoyant-cloud-agent 1 301d Also ensure you have permission to create ServiceAccounts: ```bash -kubectl -n buoyant-cloud auth can-i create ServiceAccount +$ kubectl -n buoyant-cloud auth can-i create ServiceAccount yes ``` @@ -2101,7 +2101,7 @@ yes Ensure that the secret exists: ```bash -kubectl -n buoyant-cloud get secret buoyant-cloud-id +$ kubectl -n buoyant-cloud get secret buoyant-cloud-id NAME TYPE DATA AGE buoyant-cloud-id Opaque 4 301d ``` @@ -2109,7 +2109,7 @@ buoyant-cloud-id Opaque 4 301d Also ensure you have permission to create ServiceAccounts: ```bash -kubectl -n buoyant-cloud auth can-i create ServiceAccount +$ kubectl -n buoyant-cloud auth can-i create ServiceAccount yes ``` @@ -2124,14 +2124,14 @@ yes Ensure the `buoyant-cloud-agent` Deployment exists: ```bash -kubectl -n buoyant-cloud get deploy/buoyant-cloud-agent +$ kubectl -n buoyant-cloud get deploy/buoyant-cloud-agent ``` If the Deployment does not exist, the `linkerd-buoyant` installation may be missing or incomplete. To reinstall the extension: ```bash -linkerd-buoyant install | kubectl apply -f - +$ linkerd-buoyant install | kubectl apply -f - ``` ### √ buoyant-cloud-agent Deployment is running @@ -2147,7 +2147,7 @@ everything to start up. If this is a permanent error, you'll want to validate the state of the `buoyant-cloud-agent` Deployment with: ```bash -kubectl -n buoyant-cloud get po --selector app=buoyant-cloud-agent +$ kubectl -n buoyant-cloud get po --selector app=buoyant-cloud-agent NAME READY STATUS RESTARTS AGE buoyant-cloud-agent-6b8c6888d7-htr7d 2/2 Running 0 156m ``` @@ -2170,7 +2170,7 @@ Ensure the `buoyant-cloud-agent` pod is injected, the `READY` column should show `2/2`: ```bash -kubectl -n buoyant-cloud get pods --selector app=buoyant-cloud-agent +$ kubectl -n buoyant-cloud get pods --selector app=buoyant-cloud-agent NAME READY STATUS RESTARTS AGE buoyant-cloud-agent-6b8c6888d7-htr7d 2/2 Running 0 161m ``` @@ -2189,7 +2189,7 @@ Make sure that the `proxy-injector` is working correctly by running Check the version with: ```bash -linkerd-buoyant version +$ linkerd-buoyant version CLI version: v0.4.4 Agent version: v0.4.4 ``` @@ -2248,7 +2248,7 @@ everything to start up. If this is a permanent error, you'll want to validate the state of the `buoyant-cloud-metrics` DaemonSet with: ```bash -kubectl -n buoyant-cloud get po --selector app=buoyant-cloud-metrics +$ kubectl -n buoyant-cloud get po --selector app=buoyant-cloud-metrics NAME READY STATUS RESTARTS AGE buoyant-cloud-metrics-kt9mv 2/2 Running 0 163m buoyant-cloud-metrics-q8jhj 2/2 Running 0 163m @@ -2274,7 +2274,7 @@ Ensure the `buoyant-cloud-metrics` pods are injected, the `READY` column should show `2/2`: ```bash -kubectl -n buoyant-cloud get pods --selector app=buoyant-cloud-metrics +$ kubectl -n buoyant-cloud get pods --selector app=buoyant-cloud-metrics NAME READY STATUS RESTARTS AGE buoyant-cloud-metrics-kt9mv 2/2 Running 0 166m buoyant-cloud-metrics-q8jhj 2/2 Running 0 166m @@ -2296,7 +2296,7 @@ Make sure that the `proxy-injector` is working correctly by running Check the version with: ```bash -kubectl -n buoyant-cloud get daemonset/buoyant-cloud-metrics -o jsonpath='{.metadata.labels}' +$ kubectl -n buoyant-cloud get daemonset/buoyant-cloud-metrics -o jsonpath='{.metadata.labels}' {"app.kubernetes.io/name":"metrics","app.kubernetes.io/part-of":"buoyant-cloud","app.kubernetes.io/version":"v0.4.4"} ``` diff --git a/linkerd.io/content/2.11/tasks/troubleshooting.md b/linkerd.io/content/2.11/tasks/troubleshooting.md index 10edc026ad..7de77a6be6 100644 --- a/linkerd.io/content/2.11/tasks/troubleshooting.md +++ b/linkerd.io/content/2.11/tasks/troubleshooting.md @@ -314,7 +314,7 @@ Example failure: Ensure the Linkerd ClusterRoles exist: ```bash -kubectl get clusterroles | grep linkerd +$ kubectl get clusterroles | grep linkerd linkerd-linkerd-destination 9d linkerd-linkerd-identity 9d linkerd-linkerd-proxy-injector 9d @@ -324,7 +324,7 @@ linkerd-policy 9d Also ensure you have permission to create ClusterRoles: ```bash -kubectl auth can-i create clusterroles +$ kubectl auth can-i create clusterroles yes ``` @@ -341,7 +341,7 @@ Example failure: Ensure the Linkerd ClusterRoleBindings exist: ```bash -kubectl get clusterrolebindings | grep linkerd +$ kubectl get clusterrolebindings | grep linkerd linkerd-linkerd-destination 9d linkerd-linkerd-identity 9d linkerd-linkerd-proxy-injector 9d @@ -351,7 +351,7 @@ linkerd-destination-policy 9d Also ensure you have permission to create ClusterRoleBindings: ```bash -kubectl auth can-i create clusterrolebindings +$ kubectl auth can-i create clusterrolebindings yes ``` @@ -368,7 +368,7 @@ Example failure: Ensure the Linkerd ServiceAccounts exist: ```bash -kubectl -n linkerd get serviceaccounts +$ kubectl -n linkerd get serviceaccounts NAME SECRETS AGE default 1 14m linkerd-destination 1 14m @@ -381,7 +381,7 @@ Also ensure you have permission to create ServiceAccounts in the Linkerd namespace: ```bash -kubectl -n linkerd auth can-i create serviceaccounts +$ kubectl -n linkerd auth can-i create serviceaccounts yes ``` @@ -398,7 +398,7 @@ Example failure: Ensure the Linkerd CRD exists: ```bash -kubectl get customresourcedefinitions +$ kubectl get customresourcedefinitions NAME CREATED AT serviceprofiles.linkerd.io 2019-04-25T21:47:31Z ``` @@ -406,7 +406,7 @@ serviceprofiles.linkerd.io 2019-04-25T21:47:31Z Also ensure you have permission to create CRDs: ```bash -kubectl auth can-i create customresourcedefinitions +$ kubectl auth can-i create customresourcedefinitions yes ``` @@ -423,14 +423,14 @@ Example failure: Ensure the Linkerd MutatingWebhookConfigurations exists: ```bash -kubectl get mutatingwebhookconfigurations | grep linkerd +$ kubectl get mutatingwebhookconfigurations | grep linkerd linkerd-proxy-injector-webhook-config 2019-07-01T13:13:26Z ``` Also ensure you have permission to create MutatingWebhookConfigurations: ```bash -kubectl auth can-i create mutatingwebhookconfigurations +$ kubectl auth can-i create mutatingwebhookconfigurations yes ``` @@ -447,14 +447,14 @@ Example failure: Ensure the Linkerd ValidatingWebhookConfiguration exists: ```bash -kubectl get validatingwebhookconfigurations | grep linkerd +$ kubectl get validatingwebhookconfigurations | grep linkerd linkerd-sp-validator-webhook-config 2019-07-01T13:13:26Z ``` Also ensure you have permission to create ValidatingWebhookConfigurations: ```bash -kubectl auth can-i create validatingwebhookconfigurations +$ kubectl auth can-i create validatingwebhookconfigurations yes ``` @@ -471,14 +471,14 @@ Example failure: Ensure the Linkerd PodSecurityPolicy exists: ```bash -kubectl get podsecuritypolicies | grep linkerd +$ kubectl get podsecuritypolicies | grep linkerd linkerd-linkerd-control-plane false NET_ADMIN,NET_RAW RunAsAny RunAsAny MustRunAs MustRunAs true configMap,emptyDir,secret,projected,downwardAPI,persistentVolumeClaim ``` Also ensure you have permission to create PodSecurityPolicies: ```bash -kubectl auth can-i create podsecuritypolicies +$ kubectl auth can-i create podsecuritypolicies yes ``` @@ -526,7 +526,7 @@ Example failure: Ensure the Linkerd ConfigMap exists: ```bash -kubectl -n linkerd get configmap/linkerd-config +$ kubectl -n linkerd get configmap/linkerd-config NAME DATA AGE linkerd-config 3 61m ``` @@ -534,7 +534,7 @@ linkerd-config 3 61m Also ensure you have permission to create ConfigMaps: ```bash -kubectl -n linkerd auth can-i create configmap +$ kubectl -n linkerd auth can-i create configmap yes ``` @@ -888,7 +888,7 @@ Example failure: Verify the state of the control plane pods with: ```bash -kubectl -n linkerd get po +$ kubectl -n linkerd get po NAME READY STATUS RESTARTS AGE linkerd-destination-5fd7b5d466-szgqm 2/2 Running 1 12m linkerd-identity-54df78c479-hbh5m 2/2 Running 0 12m @@ -1271,7 +1271,7 @@ Example error: Ensure that the linkerd-cni-config ConfigMap exists in the CNI namespace: ```bash -kubectl get cm linkerd-cni-config -n linkerd-cni +$ kubectl get cm linkerd-cni-config -n linkerd-cni NAME PRIV CAPS SELINUX RUNASUSER FSGROUP SUPGROUP READONLYROOTFS VOLUMES linkerd-linkerd-cni-cni false RunAsAny RunAsAny RunAsAny RunAsAny false hostPath,secret ``` @@ -1279,7 +1279,7 @@ linkerd-linkerd-cni-cni false RunAsAny RunAsAny RunAsAny RunAs Also ensure you have permission to create ConfigMaps: ```bash -kubectl auth can-i create ConfigMaps +$ kubectl auth can-i create ConfigMaps yes ``` @@ -1296,7 +1296,7 @@ Example error: Ensure that the pod security policy exists: ```bash -kubectl get psp linkerd-linkerd-cni-cni +$ kubectl get psp linkerd-linkerd-cni-cni NAME PRIV CAPS SELINUX RUNASUSER FSGROUP SUPGROUP READONLYROOTFS VOLUMES linkerd-linkerd-cni-cni false RunAsAny RunAsAny RunAsAny RunAsAny false hostPath,secret ``` @@ -1304,7 +1304,7 @@ linkerd-linkerd-cni-cni false RunAsAny RunAsAny RunAsAny RunAs Also ensure you have permission to create PodSecurityPolicies: ```bash -kubectl auth can-i create PodSecurityPolicies +$ kubectl auth can-i create PodSecurityPolicies yes ``` @@ -1321,7 +1321,7 @@ Example error: Ensure that the cluster role exists: ```bash -kubectl get clusterrole linkerd-cni +$ kubectl get clusterrole linkerd-cni NAME AGE linkerd-cni 54m ``` @@ -1329,7 +1329,7 @@ linkerd-cni 54m Also ensure you have permission to create ClusterRoles: ```bash -kubectl auth can-i create ClusterRoles +$ kubectl auth can-i create ClusterRoles yes ``` @@ -1346,7 +1346,7 @@ Example error: Ensure that the cluster role binding exists: ```bash -kubectl get clusterrolebinding linkerd-cni +$ kubectl get clusterrolebinding linkerd-cni NAME AGE linkerd-cni 54m ``` @@ -1354,7 +1354,7 @@ linkerd-cni 54m Also ensure you have permission to create ClusterRoleBindings: ```bash -kubectl auth can-i create ClusterRoleBindings +$ kubectl auth can-i create ClusterRoleBindings yes ``` @@ -1371,7 +1371,7 @@ Example error: Ensure that the role exists in the CNI namespace: ```bash -kubectl get role linkerd-cni -n linkerd-cni +$ kubectl get role linkerd-cni -n linkerd-cni NAME AGE linkerd-cni 52m ``` @@ -1379,7 +1379,7 @@ linkerd-cni 52m Also ensure you have permission to create Roles: ```bash -kubectl auth can-i create Roles -n linkerd-cni +$ kubectl auth can-i create Roles -n linkerd-cni yes ``` @@ -1396,7 +1396,7 @@ Example error: Ensure that the role binding exists in the CNI namespace: ```bash -kubectl get rolebinding linkerd-cni -n linkerd-cni +$ kubectl get rolebinding linkerd-cni -n linkerd-cni NAME AGE linkerd-cni 49m ``` @@ -1404,7 +1404,7 @@ linkerd-cni 49m Also ensure you have permission to create RoleBindings: ```bash -kubectl auth can-i create RoleBindings -n linkerd-cni +$ kubectl auth can-i create RoleBindings -n linkerd-cni yes ``` @@ -1421,7 +1421,7 @@ Example error: Ensure that the CNI service account exists in the CNI namespace: ```bash -kubectl get ServiceAccount linkerd-cni -n linkerd-cni +$ kubectl get ServiceAccount linkerd-cni -n linkerd-cni NAME SECRETS AGE linkerd-cni 1 45m ``` @@ -1429,7 +1429,7 @@ linkerd-cni 1 45m Also ensure you have permission to create ServiceAccount: ```bash -kubectl auth can-i create ServiceAccounts -n linkerd-cni +$ kubectl auth can-i create ServiceAccounts -n linkerd-cni yes ``` @@ -1446,7 +1446,7 @@ Example error: Ensure that the CNI daemonset exists in the CNI namespace: ```bash -kubectl get ds -n linkerd-cni +$ kubectl get ds -n linkerd-cni NAME DESIRED CURRENT READY UP-TO-DATE AVAILABLE NODE SELECTOR AGE linkerd-cni 1 1 1 1 1 beta.kubernetes.io/os=linux 14m ``` @@ -1454,7 +1454,7 @@ linkerd-cni 1 1 1 1 1 beta.kubernet Also ensure you have permission to create DaemonSets: ```bash -kubectl auth can-i create DaemonSets -n linkerd-cni +$ kubectl auth can-i create DaemonSets -n linkerd-cni yes ``` @@ -1471,7 +1471,7 @@ Example failure: Ensure that all the CNI pods are running: ```bash -kubectl get po -n linkerd-cni +$ kubectl get po -n linkerd-cni NAME READY STATUS RESTARTS AGE linkerd-cni-rzp2q 1/1 Running 0 9m20s linkerd-cni-mf564 1/1 Running 0 9m22s @@ -1481,7 +1481,7 @@ linkerd-cni-p5670 1/1 Running 0 9m25s Ensure that all pods have finished the deployment of the CNI config and binary: ```bash -kubectl logs linkerd-cni-rzp2q -n linkerd-cni +$ kubectl logs linkerd-cni-rzp2q -n linkerd-cni Wrote linkerd CNI binaries to /host/opt/cni/bin Created CNI config /host/etc/cni/net.d/10-kindnet.conflist Done configuring CNI. Sleep=true @@ -1509,7 +1509,7 @@ Make sure multicluster extension is correctly installed and that the `links.multicluster.linkerd.io` CRD is present. ```bash -kubectl get crds | grep multicluster +$ kubectl get crds | grep multicluster NAME CREATED AT links.multicluster.linkerd.io 2021-03-10T09:58:10Z ``` @@ -1588,7 +1588,7 @@ the rules section. Expected rules for `linkerd-service-mirror-access-local-resources` cluster role: ```bash -kubectl --context=local get clusterrole linkerd-service-mirror-access-local-resources -o yaml +$ kubectl --context=local get clusterrole linkerd-service-mirror-access-local-resources -o yaml kind: ClusterRole metadata: labels: @@ -1654,7 +1654,7 @@ everything to start up. If this is a permanent error, you'll want to validate the state of the controller pod with: ```bash -kubectl --all-namespaces get po --selector linkerd.io/control-plane-component=linkerd-service-mirror +$ kubectl --all-namespaces get po --selector linkerd.io/control-plane-component=linkerd-service-mirror NAME READY STATUS RESTARTS AGE linkerd-service-mirror-7bb8ff5967-zg265 2/2 Running 0 50m ``` @@ -1781,7 +1781,7 @@ Example failure: Ensure the linkerd-viz extension ClusterRoleBindings exist: ```bash -kubectl get clusterrolebindings | grep linkerd-viz +$ kubectl get clusterrolebindings | grep linkerd-viz linkerd-linkerd-viz-metrics-api ClusterRole/linkerd-linkerd-viz-metrics-api 18h linkerd-linkerd-viz-prometheus ClusterRole/linkerd-linkerd-viz-prometheus 18h linkerd-linkerd-viz-tap ClusterRole/linkerd-linkerd-viz-tap 18h @@ -1793,7 +1793,7 @@ linkerd-linkerd-viz-web-check ClusterRole/linkerd-linke Also ensure you have permission to create ClusterRoleBindings: ```bash -kubectl auth can-i create clusterrolebindings +$ kubectl auth can-i create clusterrolebindings yes ``` @@ -1865,7 +1865,7 @@ requirements in the cluster: Ensure all the linkerd-viz pods are injected ```bash -kubectl -n linkerd-viz get pods +$ kubectl -n linkerd-viz get pods NAME READY STATUS RESTARTS AGE grafana-68cddd7cc8-nrv4h 2/2 Running 3 18h metrics-api-77f684f7c7-hnw8r 2/2 Running 2 18h @@ -1889,7 +1889,7 @@ Make sure that the `proxy-injector` is working correctly by running Ensure all the linkerd-viz pods are running with 2/2 ```bash -kubectl -n linkerd-viz get pods +$ kubectl -n linkerd-viz get pods NAME READY STATUS RESTARTS AGE grafana-68cddd7cc8-nrv4h 2/2 Running 3 18h metrics-api-77f684f7c7-hnw8r 2/2 Running 2 18h @@ -1913,12 +1913,12 @@ Make sure that the `proxy-injector` is working correctly by running Ensure all the prometheus related resources are present and running correctly. ```bash -❯ kubectl -n linkerd-viz get deploy,cm | grep prometheus +$ kubectl -n linkerd-viz get deploy,cm | grep prometheus deployment.apps/prometheus 1/1 1 1 3m18s configmap/prometheus-config 1 3m18s -❯ kubectl get clusterRoleBindings | grep prometheus +$ kubectl get clusterRoleBindings | grep prometheus linkerd-linkerd-viz-prometheus ClusterRole/linkerd-linkerd-viz-prometheus 3m37s -❯ kubectl get clusterRoles | grep prometheus +$ kubectl get clusterRoles | grep prometheus linkerd-linkerd-viz-prometheus 2021-02-26T06:03:11Zh ``` @@ -1934,7 +1934,7 @@ Example failure: Verify that the metrics API pod is running correctly ```bash -❯ kubectl -n linkerd-viz get pods +$ kubectl -n linkerd-viz get pods NAME READY STATUS RESTARTS AGE metrics-api-7bb8cb8489-cbq4m 2/2 Running 0 4m58s tap-injector-6b9bc6fc4-cgbr4 2/2 Running 0 4m56s @@ -2021,7 +2021,7 @@ Example failure: Ensure the Linkerd ConfigMap exists: ```bash -kubectl -n linkerd-jaeger get configmap/collector-config +$ kubectl -n linkerd-jaeger get configmap/collector-config NAME DATA AGE collector-config 1 61m ``` @@ -2029,7 +2029,7 @@ collector-config 1 61m Also ensure you have permission to create ConfigMaps: ```bash -kubectl -n linkerd-jaeger auth can-i create configmap +$ kubectl -n linkerd-jaeger auth can-i create configmap yes ``` @@ -2044,7 +2044,7 @@ yes Ensure all the jaeger pods are injected ```bash -kubectl -n linkerd-jaeger get pods +$ kubectl -n linkerd-jaeger get pods NAME READY STATUS RESTARTS AGE collector-69cc44dfbc-rhpfg 2/2 Running 0 11s jaeger-6f98d5c979-scqlq 2/2 Running 0 11s @@ -2065,7 +2065,7 @@ Make sure that the `proxy-injector` is working correctly by running Ensure all the linkerd-jaeger pods are running with 2/2 ```bash -kubectl -n linkerd-jaeger get pods +$ kubectl -n linkerd-jaeger get pods NAME READY STATUS RESTARTS AGE jaeger-injector-548684d74b-bcq5h 2/2 Running 0 5s collector-69cc44dfbc-wqf6s 2/2 Running 0 5s @@ -2179,7 +2179,7 @@ linkerd-buoyant install | kubectl apply -f - Ensure that the cluster role exists: ```bash -kubectl get clusterrole buoyant-cloud-agent +$ kubectl get clusterrole buoyant-cloud-agent NAME CREATED AT buoyant-cloud-agent 2020-11-13T00:59:50Z ``` @@ -2187,7 +2187,7 @@ buoyant-cloud-agent 2020-11-13T00:59:50Z Also ensure you have permission to create ClusterRoles: ```bash -kubectl auth can-i create ClusterRoles +$ kubectl auth can-i create clusterroles yes ``` @@ -2202,7 +2202,7 @@ yes Ensure that the cluster role binding exists: ```bash -kubectl get clusterrolebinding buoyant-cloud-agent +$ kubectl get clusterrolebinding buoyant-cloud-agent NAME ROLE AGE buoyant-cloud-agent ClusterRole/buoyant-cloud-agent 301d ``` @@ -2210,7 +2210,7 @@ buoyant-cloud-agent ClusterRole/buoyant-cloud-agent 301d Also ensure you have permission to create ClusterRoleBindings: ```bash -kubectl auth can-i create ClusterRoleBindings +$ kubectl auth can-i create ClusterRoleBindings yes ``` @@ -2225,7 +2225,7 @@ yes Ensure that the service account exists: ```bash -kubectl -n buoyant-cloud get serviceaccount buoyant-cloud-agent +$ kubectl -n buoyant-cloud get serviceaccount buoyant-cloud-agent NAME SECRETS AGE buoyant-cloud-agent 1 301d ``` @@ -2233,7 +2233,7 @@ buoyant-cloud-agent 1 301d Also ensure you have permission to create ServiceAccounts: ```bash -kubectl -n buoyant-cloud auth can-i create ServiceAccount +$ kubectl -n buoyant-cloud auth can-i create ServiceAccount yes ``` @@ -2248,7 +2248,7 @@ yes Ensure that the secret exists: ```bash -kubectl -n buoyant-cloud get secret buoyant-cloud-id +$ kubectl -n buoyant-cloud get secret buoyant-cloud-id NAME TYPE DATA AGE buoyant-cloud-id Opaque 4 301d ``` @@ -2256,7 +2256,7 @@ buoyant-cloud-id Opaque 4 301d Also ensure you have permission to create ServiceAccounts: ```bash -kubectl -n buoyant-cloud auth can-i create ServiceAccount +$ kubectl -n buoyant-cloud auth can-i create ServiceAccount yes ``` @@ -2271,14 +2271,14 @@ yes Ensure the `buoyant-cloud-agent` Deployment exists: ```bash -kubectl -n buoyant-cloud get deploy/buoyant-cloud-agent +$ kubectl -n buoyant-cloud get deploy/buoyant-cloud-agent ``` If the Deployment does not exist, the `linkerd-buoyant` installation may be missing or incomplete. To reinstall the extension: ```bash -linkerd-buoyant install | kubectl apply -f - +$ linkerd-buoyant install | kubectl apply -f - ``` ### √ buoyant-cloud-agent Deployment is running @@ -2294,7 +2294,7 @@ everything to start up. If this is a permanent error, you'll want to validate the state of the `buoyant-cloud-agent` Deployment with: ```bash -kubectl -n buoyant-cloud get po --selector app=buoyant-cloud-agent +$ kubectl -n buoyant-cloud get po --selector app=buoyant-cloud-agent NAME READY STATUS RESTARTS AGE buoyant-cloud-agent-6b8c6888d7-htr7d 2/2 Running 0 156m ``` @@ -2317,7 +2317,7 @@ Ensure the `buoyant-cloud-agent` pod is injected, the `READY` column should show `2/2`: ```bash -kubectl -n buoyant-cloud get pods --selector app=buoyant-cloud-agent +$ kubectl -n buoyant-cloud get pods --selector app=buoyant-cloud-agent NAME READY STATUS RESTARTS AGE buoyant-cloud-agent-6b8c6888d7-htr7d 2/2 Running 0 161m ``` @@ -2336,7 +2336,7 @@ Make sure that the `proxy-injector` is working correctly by running Check the version with: ```bash -linkerd-buoyant version +$ linkerd-buoyant version CLI version: v0.4.4 Agent version: v0.4.4 ``` @@ -2395,7 +2395,7 @@ everything to start up. If this is a permanent error, you'll want to validate the state of the `buoyant-cloud-metrics` DaemonSet with: ```bash -kubectl -n buoyant-cloud get po --selector app=buoyant-cloud-metrics +$ kubectl -n buoyant-cloud get po --selector app=buoyant-cloud-metrics NAME READY STATUS RESTARTS AGE buoyant-cloud-metrics-kt9mv 2/2 Running 0 163m buoyant-cloud-metrics-q8jhj 2/2 Running 0 163m @@ -2443,7 +2443,7 @@ Make sure that the `proxy-injector` is working correctly by running Check the version with: ```bash -kubectl -n buoyant-cloud get daemonset/buoyant-cloud-metrics -o jsonpath='{.metadata.labels}' +$ kubectl -n buoyant-cloud get daemonset/buoyant-cloud-metrics -o jsonpath='{.metadata.labels}' {"app.kubernetes.io/name":"metrics","app.kubernetes.io/part-of":"buoyant-cloud","app.kubernetes.io/version":"v0.4.4"} ``` diff --git a/linkerd.io/content/2.12/tasks/troubleshooting.md b/linkerd.io/content/2.12/tasks/troubleshooting.md index b142ee66a9..ac0151b085 100644 --- a/linkerd.io/content/2.12/tasks/troubleshooting.md +++ b/linkerd.io/content/2.12/tasks/troubleshooting.md @@ -230,7 +230,7 @@ Example failure: Ensure the Linkerd ClusterRoles exist: ```bash -kubectl get clusterroles | grep linkerd +$ kubectl get clusterroles | grep linkerd linkerd-linkerd-destination 9d linkerd-linkerd-identity 9d linkerd-linkerd-proxy-injector 9d @@ -240,7 +240,7 @@ linkerd-policy 9d Also ensure you have permission to create ClusterRoles: ```bash -kubectl auth can-i create clusterroles +$ kubectl auth can-i create clusterroles yes ``` @@ -257,7 +257,7 @@ Example failure: Ensure the Linkerd ClusterRoleBindings exist: ```bash -kubectl get clusterrolebindings | grep linkerd +$ kubectl get clusterrolebindings | grep linkerd linkerd-linkerd-destination 9d linkerd-linkerd-identity 9d linkerd-linkerd-proxy-injector 9d @@ -267,7 +267,7 @@ linkerd-destination-policy 9d Also ensure you have permission to create ClusterRoleBindings: ```bash -kubectl auth can-i create clusterrolebindings +$ kubectl auth can-i create clusterrolebindings yes ``` @@ -284,7 +284,7 @@ Example failure: Ensure the Linkerd ServiceAccounts exist: ```bash -kubectl -n linkerd get serviceaccounts +$ kubectl -n linkerd get serviceaccounts NAME SECRETS AGE default 1 14m linkerd-destination 1 14m @@ -297,7 +297,7 @@ Also ensure you have permission to create ServiceAccounts in the Linkerd namespace: ```bash -kubectl -n linkerd auth can-i create serviceaccounts +$ kubectl -n linkerd auth can-i create serviceaccounts yes ``` @@ -314,7 +314,7 @@ Example failure: Ensure the Linkerd CRD exists: ```bash -kubectl get customresourcedefinitions +$ kubectl get customresourcedefinitions NAME CREATED AT serviceprofiles.linkerd.io 2019-04-25T21:47:31Z ``` @@ -322,7 +322,7 @@ serviceprofiles.linkerd.io 2019-04-25T21:47:31Z Also ensure you have permission to create CRDs: ```bash -kubectl auth can-i create customresourcedefinitions +$ kubectl auth can-i create customresourcedefinitions yes ``` @@ -339,14 +339,14 @@ Example failure: Ensure the Linkerd MutatingWebhookConfigurations exists: ```bash -kubectl get mutatingwebhookconfigurations | grep linkerd +$ kubectl get mutatingwebhookconfigurations | grep linkerd linkerd-proxy-injector-webhook-config 2019-07-01T13:13:26Z ``` Also ensure you have permission to create MutatingWebhookConfigurations: ```bash -kubectl auth can-i create mutatingwebhookconfigurations +$ kubectl auth can-i create mutatingwebhookconfigurations yes ``` @@ -363,14 +363,14 @@ Example failure: Ensure the Linkerd ValidatingWebhookConfiguration exists: ```bash -kubectl get validatingwebhookconfigurations | grep linkerd +$ kubectl get validatingwebhookconfigurations | grep linkerd linkerd-sp-validator-webhook-config 2019-07-01T13:13:26Z ``` Also ensure you have permission to create ValidatingWebhookConfigurations: ```bash -kubectl auth can-i create validatingwebhookconfigurations +$ kubectl auth can-i create validatingwebhookconfigurations yes ``` @@ -418,7 +418,7 @@ Example failure: Ensure the Linkerd ConfigMap exists: ```bash -kubectl -n linkerd get configmap/linkerd-config +$ kubectl -n linkerd get configmap/linkerd-config NAME DATA AGE linkerd-config 3 61m ``` @@ -426,7 +426,7 @@ linkerd-config 3 61m Also ensure you have permission to create ConfigMaps: ```bash -kubectl -n linkerd auth can-i create configmap +$ kubectl -n linkerd auth can-i create configmap yes ``` @@ -780,7 +780,7 @@ Example failure: Verify the state of the control plane pods with: ```bash -kubectl -n linkerd get po +$ kubectl -n linkerd get po NAME READY STATUS RESTARTS AGE linkerd-destination-5fd7b5d466-szgqm 2/2 Running 1 12m linkerd-identity-54df78c479-hbh5m 2/2 Running 0 12m @@ -1045,7 +1045,7 @@ Ensure the kube-system namespace has the `config.linkerd.io/admission-webhooks:disabled` label: ```bash -kubectl get namespace kube-system -oyaml +$ kubectl get namespace kube-system -oyaml kind: Namespace apiVersion: v1 metadata: @@ -1281,7 +1281,7 @@ Make sure multicluster extension is correctly installed and that the `links.multicluster.linkerd.io` CRD is present. ```bash -kubectl get crds | grep multicluster +$ kubectl get crds | grep multicluster NAME CREATED AT links.multicluster.linkerd.io 2021-03-10T09:58:10Z ``` @@ -1360,7 +1360,7 @@ the rules section. Expected rules for `linkerd-service-mirror-access-local-resources` cluster role: ```bash -kubectl --context=local get clusterrole linkerd-service-mirror-access-local-resources -o yaml +$ kubectl --context=local get clusterrole linkerd-service-mirror-access-local-resources -o yaml kind: ClusterRole metadata: labels: @@ -1393,7 +1393,7 @@ rules: Expected rules for `linkerd-service-mirror-read-remote-creds` role: ```bash -kubectl --context=local get role linkerd-service-mirror-read-remote-creds -n linkerd-multicluster -o yaml +$ kubectl --context=local get role linkerd-service-mirror-read-remote-creds -n linkerd-multicluster -o yaml kind: Role metadata: labels: @@ -1426,7 +1426,7 @@ everything to start up. If this is a permanent error, you'll want to validate the state of the controller pod with: ```bash -kubectl --all-namespaces get po --selector linkerd.io/control-plane-component=linkerd-service-mirror +$ kubectl --all-namespaces get po --selector linkerd.io/control-plane-component=linkerd-service-mirror NAME READY STATUS RESTARTS AGE linkerd-service-mirror-7bb8ff5967-zg265 2/2 Running 0 50m ``` @@ -1544,7 +1544,7 @@ Example failure: Ensure the linkerd-viz extension ClusterRoles exist: ```bash -kubectl get clusterroles | grep linkerd-viz +$ kubectl get clusterroles | grep linkerd-viz linkerd-linkerd-viz-metrics-api 2021-01-26T18:02:17Z linkerd-linkerd-viz-prometheus 2021-01-26T18:02:17Z linkerd-linkerd-viz-tap 2021-01-26T18:02:17Z @@ -1572,7 +1572,7 @@ Example failure: Ensure the linkerd-viz extension ClusterRoleBindings exist: ```bash -kubectl get clusterrolebindings | grep linkerd-viz +$ kubectl get clusterrolebindings | grep linkerd-viz linkerd-linkerd-viz-metrics-api ClusterRole/linkerd-linkerd-viz-metrics-api 18h linkerd-linkerd-viz-prometheus ClusterRole/linkerd-linkerd-viz-prometheus 18h linkerd-linkerd-viz-tap ClusterRole/linkerd-linkerd-viz-tap 18h @@ -1584,7 +1584,7 @@ linkerd-linkerd-viz-web-check ClusterRole/linkerd-linke Also ensure you have permission to create ClusterRoleBindings: ```bash -kubectl auth can-i create clusterrolebindings +$ kubectl auth can-i create clusterrolebindings yes ``` @@ -1673,7 +1673,7 @@ requirements in the cluster: Ensure all the linkerd-viz pods are injected ```bash -kubectl -n linkerd-viz get pods +$ kubectl -n linkerd-viz get pods NAME READY STATUS RESTARTS AGE grafana-68cddd7cc8-nrv4h 2/2 Running 3 18h metrics-api-77f684f7c7-hnw8r 2/2 Running 2 18h @@ -1697,7 +1697,7 @@ Make sure that the `proxy-injector` is working correctly by running Ensure all the linkerd-viz pods are running with 2/2 ```bash -kubectl -n linkerd-viz get pods +$ kubectl -n linkerd-viz get pods NAME READY STATUS RESTARTS AGE grafana-68cddd7cc8-nrv4h 2/2 Running 3 18h metrics-api-77f684f7c7-hnw8r 2/2 Running 2 18h @@ -1721,12 +1721,12 @@ Make sure that the `proxy-injector` is working correctly by running Ensure all the prometheus related resources are present and running correctly. ```bash -❯ kubectl -n linkerd-viz get deploy,cm | grep prometheus +$ kubectl -n linkerd-viz get deploy,cm | grep prometheus deployment.apps/prometheus 1/1 1 1 3m18s configmap/prometheus-config 1 3m18s -❯ kubectl get clusterRoleBindings | grep prometheus +$ kubectl get clusterRoleBindings | grep prometheus linkerd-linkerd-viz-prometheus ClusterRole/linkerd-linkerd-viz-prometheus 3m37s -❯ kubectl get clusterRoles | grep prometheus +$ kubectl get clusterRoles | grep prometheus linkerd-linkerd-viz-prometheus 2021-02-26T06:03:11Zh ``` @@ -1742,7 +1742,7 @@ Example failure: Verify that the metrics API pod is running correctly ```bash -❯ kubectl -n linkerd-viz get pods +$ kubectl -n linkerd-viz get pods NAME READY STATUS RESTARTS AGE metrics-api-7bb8cb8489-cbq4m 2/2 Running 0 4m58s tap-injector-6b9bc6fc4-cgbr4 2/2 Running 0 4m56s @@ -1880,7 +1880,7 @@ versions in sync by updating either the CLI or linkerd-jaeger as necessary. Ensure all the jaeger pods are injected ```bash -kubectl -n linkerd-jaeger get pods +$ kubectl -n linkerd-jaeger get pods NAME READY STATUS RESTARTS AGE collector-69cc44dfbc-rhpfg 2/2 Running 0 11s jaeger-6f98d5c979-scqlq 2/2 Running 0 11s @@ -1901,7 +1901,7 @@ Make sure that the `proxy-injector` is working correctly by running Ensure all the linkerd-jaeger pods are running with 2/2 ```bash -kubectl -n linkerd-jaeger get pods +$ kubectl -n linkerd-jaeger get pods NAME READY STATUS RESTARTS AGE jaeger-injector-548684d74b-bcq5h 2/2 Running 0 5s collector-69cc44dfbc-wqf6s 2/2 Running 0 5s @@ -1950,7 +1950,7 @@ Ensure you can connect to the Linkerd Buoyant version check endpoint from the environment the `linkerd` cli is running: ```bash -curl https://buoyant.cloud/version.json +$ curl https://buoyant.cloud/version.json {"linkerd-buoyant":"v0.4.4"} ``` @@ -2015,7 +2015,7 @@ linkerd-buoyant install | kubectl apply -f - Ensure that the cluster role exists: ```bash -kubectl get clusterrole buoyant-cloud-agent +$ kubectl get clusterrole buoyant-cloud-agent NAME CREATED AT buoyant-cloud-agent 2020-11-13T00:59:50Z ``` @@ -2023,7 +2023,7 @@ buoyant-cloud-agent 2020-11-13T00:59:50Z Also ensure you have permission to create ClusterRoles: ```bash -kubectl auth can-i create ClusterRoles +$ kubectl auth can-i create clusterroles yes ``` @@ -2038,7 +2038,7 @@ yes Ensure that the cluster role binding exists: ```bash -kubectl get clusterrolebinding buoyant-cloud-agent +$ kubectl get clusterrolebinding buoyant-cloud-agent NAME ROLE AGE buoyant-cloud-agent ClusterRole/buoyant-cloud-agent 301d ``` @@ -2046,7 +2046,7 @@ buoyant-cloud-agent ClusterRole/buoyant-cloud-agent 301d Also ensure you have permission to create ClusterRoleBindings: ```bash -kubectl auth can-i create ClusterRoleBindings +$ kubectl auth can-i create ClusterRoleBindings yes ``` @@ -2061,7 +2061,7 @@ yes Ensure that the service account exists: ```bash -kubectl -n buoyant-cloud get serviceaccount buoyant-cloud-agent +$ kubectl -n buoyant-cloud get serviceaccount buoyant-cloud-agent NAME SECRETS AGE buoyant-cloud-agent 1 301d ``` @@ -2069,7 +2069,7 @@ buoyant-cloud-agent 1 301d Also ensure you have permission to create ServiceAccounts: ```bash -kubectl -n buoyant-cloud auth can-i create ServiceAccount +$ kubectl -n buoyant-cloud auth can-i create ServiceAccount yes ``` @@ -2084,7 +2084,7 @@ yes Ensure that the secret exists: ```bash -kubectl -n buoyant-cloud get secret buoyant-cloud-id +$ kubectl -n buoyant-cloud get secret buoyant-cloud-id NAME TYPE DATA AGE buoyant-cloud-id Opaque 4 301d ``` @@ -2092,7 +2092,7 @@ buoyant-cloud-id Opaque 4 301d Also ensure you have permission to create ServiceAccounts: ```bash -kubectl -n buoyant-cloud auth can-i create ServiceAccount +$ kubectl -n buoyant-cloud auth can-i create ServiceAccount yes ``` @@ -2107,14 +2107,14 @@ yes Ensure the `buoyant-cloud-agent` Deployment exists: ```bash -kubectl -n buoyant-cloud get deploy/buoyant-cloud-agent +$ kubectl -n buoyant-cloud get deploy/buoyant-cloud-agent ``` If the Deployment does not exist, the `linkerd-buoyant` installation may be missing or incomplete. To reinstall the extension: ```bash -linkerd-buoyant install | kubectl apply -f - +$ linkerd-buoyant install | kubectl apply -f - ``` ### √ buoyant-cloud-agent Deployment is running @@ -2130,7 +2130,7 @@ everything to start up. If this is a permanent error, you'll want to validate the state of the `buoyant-cloud-agent` Deployment with: ```bash -kubectl -n buoyant-cloud get po --selector app=buoyant-cloud-agent +$ kubectl -n buoyant-cloud get po --selector app=buoyant-cloud-agent NAME READY STATUS RESTARTS AGE buoyant-cloud-agent-6b8c6888d7-htr7d 2/2 Running 0 156m ``` @@ -2153,7 +2153,7 @@ Ensure the `buoyant-cloud-agent` pod is injected, the `READY` column should show `2/2`: ```bash -kubectl -n buoyant-cloud get pods --selector app=buoyant-cloud-agent +$ kubectl -n buoyant-cloud get pods --selector app=buoyant-cloud-agent NAME READY STATUS RESTARTS AGE buoyant-cloud-agent-6b8c6888d7-htr7d 2/2 Running 0 161m ``` @@ -2172,7 +2172,7 @@ Make sure that the `proxy-injector` is working correctly by running Check the version with: ```bash -linkerd-buoyant version +$ linkerd-buoyant version CLI version: v0.4.4 Agent version: v0.4.4 ``` @@ -2180,7 +2180,7 @@ Agent version: v0.4.4 To update to the latest version: ```bash -linkerd-buoyant install | kubectl apply -f - +$ linkerd-buoyant install | kubectl apply -f - ``` ### √ buoyant-cloud-agent Deployment is running a single pod @@ -2194,7 +2194,7 @@ linkerd-buoyant install | kubectl apply -f - `buoyant-cloud-agent` should run as a singleton. Check for other pods: ```bash -kubectl get po -A --selector app=buoyant-cloud-agent +$ kubectl get po -A --selector app=buoyant-cloud-agent ``` ### √ buoyant-cloud-metrics DaemonSet exists @@ -2208,14 +2208,14 @@ kubectl get po -A --selector app=buoyant-cloud-agent Ensure the `buoyant-cloud-metrics` DaemonSet exists: ```bash -kubectl -n buoyant-cloud get daemonset/buoyant-cloud-metrics +$ kubectl -n buoyant-cloud get daemonset/buoyant-cloud-metrics ``` If the DaemonSet does not exist, the `linkerd-buoyant` installation may be missing or incomplete. To reinstall the extension: ```bash -linkerd-buoyant install | kubectl apply -f - +$ linkerd-buoyant install | kubectl apply -f - ``` ### √ buoyant-cloud-metrics DaemonSet is running @@ -2231,7 +2231,7 @@ everything to start up. If this is a permanent error, you'll want to validate the state of the `buoyant-cloud-metrics` DaemonSet with: ```bash -kubectl -n buoyant-cloud get po --selector app=buoyant-cloud-metrics +$ kubectl -n buoyant-cloud get po --selector app=buoyant-cloud-metrics NAME READY STATUS RESTARTS AGE buoyant-cloud-metrics-kt9mv 2/2 Running 0 163m buoyant-cloud-metrics-q8jhj 2/2 Running 0 163m @@ -2257,7 +2257,7 @@ Ensure the `buoyant-cloud-metrics` pods are injected, the `READY` column should show `2/2`: ```bash -kubectl -n buoyant-cloud get pods --selector app=buoyant-cloud-metrics +$ kubectl -n buoyant-cloud get pods --selector app=buoyant-cloud-metrics NAME READY STATUS RESTARTS AGE buoyant-cloud-metrics-kt9mv 2/2 Running 0 166m buoyant-cloud-metrics-q8jhj 2/2 Running 0 166m @@ -2279,7 +2279,7 @@ Make sure that the `proxy-injector` is working correctly by running Check the version with: ```bash -kubectl -n buoyant-cloud get daemonset/buoyant-cloud-metrics -o jsonpath='{.metadata.labels}' +$ kubectl -n buoyant-cloud get daemonset/buoyant-cloud-metrics -o jsonpath='{.metadata.labels}' {"app.kubernetes.io/name":"metrics","app.kubernetes.io/part-of":"buoyant-cloud","app.kubernetes.io/version":"v0.4.4"} ``` diff --git a/linkerd.io/content/2.12/tasks/uninstall-multicluster.md b/linkerd.io/content/2.12/tasks/uninstall-multicluster.md index 71b3761399..0b850fe583 100644 --- a/linkerd.io/content/2.12/tasks/uninstall-multicluster.md +++ b/linkerd.io/content/2.12/tasks/uninstall-multicluster.md @@ -24,7 +24,6 @@ up correctly and are not left orphaned. To unlink, run the `linkerd multicluster unlink` command and pipe the output to `kubectl delete`: - ```bash linkerd multicluster unlink --cluster-name=target | kubectl delete -f - ``` diff --git a/linkerd.io/content/2.13/tasks/getting-per-route-metrics.md b/linkerd.io/content/2.13/tasks/getting-per-route-metrics.md index 9f66470e28..a38bdbea89 100644 --- a/linkerd.io/content/2.13/tasks/getting-per-route-metrics.md +++ b/linkerd.io/content/2.13/tasks/getting-per-route-metrics.md @@ -24,7 +24,7 @@ per-route authorization. You can view per-route metrics in the CLI by running `linkerd viz routes`: ```bash -linkerd viz routes svc/webapp +$ linkerd viz routes svc/webapp ROUTE SERVICE SUCCESS RPS LATENCY_P50 LATENCY_P95 LATENCY_P99 GET / webapp 100.00% 0.6rps 25ms 30ms 30ms GET /authors/{id} webapp 100.00% 0.6rps 22ms 29ms 30ms @@ -44,7 +44,7 @@ specified in your service profile will end up there. It is also possible to look the metrics up by other resource types, such as: ```bash -linkerd viz routes deploy/webapp +$ linkerd viz routes deploy/webapp ROUTE SERVICE SUCCESS RPS LATENCY_P50 LATENCY_P95 LATENCY_P99 [DEFAULT] kubernetes 0.00% 0.0rps 0ms 0ms 0ms GET / webapp 100.00% 0.5rps 27ms 38ms 40ms @@ -63,7 +63,7 @@ Then, it is possible to filter all the way down to requests going from a specific resource to other services: ```bash -linkerd viz routes deploy/webapp --to svc/books +$ linkerd viz routes deploy/webapp --to svc/books ROUTE SERVICE SUCCESS RPS LATENCY_P50 LATENCY_P95 LATENCY_P99 DELETE /books/{id}.json books 100.00% 0.5rps 18ms 29ms 30ms GET /books.json books 100.00% 1.1rps 7ms 12ms 18ms @@ -86,7 +86,7 @@ linkerd viz tap deploy/webapp -o wide | grep req A sample output is: ```bash -req id=3:1 proxy=in src=10.4.0.14:58562 dst=10.4.1.4:7000 tls=disabled :method=POST :authority=webapp:7000 :path=/books/24783/edit src_res=deploy/traffic src_ns=default dst_res=deploy/webapp dst_ns=default rt_route=POST /books/{id}/edit +$ req id=3:1 proxy=in src=10.4.0.14:58562 dst=10.4.1.4:7000 tls=disabled :method=POST :authority=webapp:7000 :path=/books/24783/edit src_res=deploy/traffic src_ns=default dst_res=deploy/webapp dst_ns=default rt_route=POST /books/{id}/edit ``` This will select only the requests observed and show the `:authority` and diff --git a/linkerd.io/content/2.13/tasks/multicluster-using-statefulsets.md b/linkerd.io/content/2.13/tasks/multicluster-using-statefulsets.md index c720c09563..1e4bee4013 100644 --- a/linkerd.io/content/2.13/tasks/multicluster-using-statefulsets.md +++ b/linkerd.io/content/2.13/tasks/multicluster-using-statefulsets.md @@ -48,8 +48,8 @@ The first step is to clone the demo repository on your local machine. ```sh # clone example repository -git clone git@github.com:mateiidavid/l2d-k3d-statefulset.git -cd l2d-k3d-statefulset +$ git clone git@github.com:mateiidavid/l2d-k3d-statefulset.git +$ cd l2d-k3d-statefulset ``` The second step consists of creating two `k3d` clusters named `east` and `west`, @@ -60,10 +60,10 @@ everything. ```sh # create k3d clusters -./create.sh +$ ./create.sh # list the clusters -k3d cluster list +$ k3d cluster list NAME SERVERS AGENTS LOADBALANCER east 1/1 0/0 true west 1/1 0/0 true @@ -78,10 +78,10 @@ provided scripts, but feel free to have a look! ```sh # Install Linkerd and multicluster, output to check should be a success -./install.sh +$ ./install.sh # Next, link the two clusters together -./link.sh +$ ./link.sh ``` Perfect! If you've made it this far with no errors, then it's a good sign. In @@ -101,17 +101,17 @@ communication. First, we will deploy our pods and services: ```sh # deploy services and mesh namespaces -./deploy.sh +$ ./deploy.sh # verify both clusters # # verify east -kubectl --context=k3d-east get pods +$ kubectl --context=k3d-east get pods NAME READY STATUS RESTARTS AGE curl-56dc7d945d-96r6p 2/2 Running 0 7s # verify west has headless service -kubectl --context=k3d-west get services +$ kubectl --context=k3d-west get services NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE kubernetes ClusterIP 10.43.0.1 443/TCP 10m nginx-svc ClusterIP None 80/TCP 8s @@ -119,7 +119,7 @@ nginx-svc ClusterIP None 80/TCP 8s # verify west has statefulset # # this may take a while to come up -kubectl --context=k3d-west get pods +$ kubectl --context=k3d-west get pods NAME READY STATUS RESTARTS AGE nginx-set-0 2/2 Running 0 53s nginx-set-1 2/2 Running 0 43s @@ -130,7 +130,7 @@ Before we go further, let's have a look at the endpoints object for the `nginx-svc`: ```sh -kubectl --context=k3d-west get endpoints nginx-svc -o yaml +$ kubectl --context=k3d-west get endpoints nginx-svc -o yaml ... subsets: - addresses: @@ -170,15 +170,15 @@ would get an answer back. We can test this out by applying the curl pod to the `west` cluster: ```sh -kubectl --context=k3d-west apply -f east/curl.yml -kubectl --context=k3d-west get pods +$ kubectl --context=k3d-west apply -f east/curl.yml +$ kubectl --context=k3d-west get pods NAME READY STATUS RESTARTS AGE nginx-set-0 2/2 Running 0 5m8s nginx-set-1 2/2 Running 0 4m58s nginx-set-2 2/2 Running 0 4m51s curl-56dc7d945d-s4n8j 0/2 PodInitializing 0 4s -kubectl --context=k3d-west exec -it curl-56dc7d945d-s4n8j -c curl -- bin/sh +$ kubectl --context=k3d-west exec -it curl-56dc7d945d-s4n8j -c curl -- bin/sh /# prompt for curl pod ``` @@ -186,7 +186,7 @@ If we now curl one of these instances, we will get back a response. ```sh # exec'd on the pod -/ curl nginx-set-0.nginx-svc.default.svc.west.cluster.local +/ $ curl nginx-set-0.nginx-svc.default.svc.west.cluster.local " @@ -218,10 +218,10 @@ Now, let's do the same, but this time from the `east` cluster. We will first export the service. ```sh -kubectl --context=k3d-west label service nginx-svc mirror.linkerd.io/exported="true" +$ kubectl --context=k3d-west label service nginx-svc mirror.linkerd.io/exported="true" service/nginx-svc labeled -kubectl --context=k3d-east get services +$ kubectl --context=k3d-east get services NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE kubernetes ClusterIP 10.43.0.1 443/TCP 20h nginx-svc-west ClusterIP None 80/TCP 29s @@ -235,7 +235,7 @@ endpoints for `nginx-svc-west` will have the same hostnames, but each hostname will point to one of the services we see above: ```sh -kubectl --context=k3d-east get endpoints nginx-svc-west -o yaml +$ kubectl --context=k3d-east get endpoints nginx-svc-west -o yaml subsets: - addresses: - hostname: nginx-set-0 @@ -251,17 +251,17 @@ cluster (`west`), will be mirrored as a clusterIP service. We will see in a second why this matters. ```sh -kubectl --context=k3d-east get pods +$ kubectl --context=k3d-east get pods NAME READY STATUS RESTARTS AGE curl-56dc7d945d-96r6p 2/2 Running 0 23m # exec and curl -kubectl --context=k3d-east exec pod curl-56dc7d945d-96r6p -it -c curl -- bin/sh +$ kubectl --context=k3d-east exec pod curl-56dc7d945d-96r6p -it -c curl -- bin/sh # we want to curl the same hostname we see in the endpoints object above. # however, the service and cluster domain will now be different, since we # are in a different cluster. # -/ curl nginx-set-0.nginx-svc-west.default.svc.east.cluster.local +/ $ curl nginx-set-0.nginx-svc-west.default.svc.east.cluster.local @@ -329,8 +329,8 @@ validation. To clean-up, you can remove both clusters entirely using the k3d CLI: ```sh -k3d cluster delete east +$ k3d cluster delete east cluster east deleted -k3d cluster delete west +$ k3d cluster delete west cluster west deleted ``` diff --git a/linkerd.io/content/2.13/tasks/restricting-access.md b/linkerd.io/content/2.13/tasks/restricting-access.md index 38ebdaeb3d..0b0b0c94b7 100644 --- a/linkerd.io/content/2.13/tasks/restricting-access.md +++ b/linkerd.io/content/2.13/tasks/restricting-access.md @@ -21,9 +21,9 @@ haven't already done this. Inject and install the Emojivoto application: ```bash -linkerd inject https://run.linkerd.io/emojivoto.yml | kubectl apply -f - +$ linkerd inject https://run.linkerd.io/emojivoto.yml | kubectl apply -f - ... -linkerd check -n emojivoto --proxy -o short +$ linkerd check -n emojivoto --proxy -o short ... ``` diff --git a/linkerd.io/content/2.13/tasks/securing-linkerd-tap.md b/linkerd.io/content/2.13/tasks/securing-linkerd-tap.md index 639f81692f..8a802c890c 100644 --- a/linkerd.io/content/2.13/tasks/securing-linkerd-tap.md +++ b/linkerd.io/content/2.13/tasks/securing-linkerd-tap.md @@ -60,7 +60,7 @@ kubectl auth can-i watch deployments.tap.linkerd.io -n emojivoto --as $(whoami) You can also use the Linkerd CLI's `--as` flag to confirm: ```bash -linkerd viz tap -n linkerd deploy/linkerd-controller --as $(whoami) +$ linkerd viz tap -n linkerd deploy/linkerd-controller --as $(whoami) Cannot connect to Linkerd Viz: namespaces is forbidden: User "XXXX" cannot list resource "namespaces" in API group "" at the cluster scope Validate the install with: linkerd viz check ... @@ -77,7 +77,7 @@ To enable tap access to all resources in all namespaces, you may bind your user to the `linkerd-linkerd-tap-admin` ClusterRole, installed by default: ```bash -kubectl describe clusterroles/linkerd-linkerd-viz-tap-admin +$ kubectl describe clusterroles/linkerd-linkerd-viz-tap-admin Name: linkerd-linkerd-viz-tap-admin Labels: component=tap linkerd.io/extension=viz @@ -109,7 +109,7 @@ kubectl create clusterrolebinding \ You can verify you now have tap access with: ```bash -linkerd viz tap -n linkerd deploy/linkerd-controller --as $(whoami) +$ linkerd viz tap -n linkerd deploy/linkerd-controller --as $(whoami) req id=3:0 proxy=in src=10.244.0.1:37392 dst=10.244.0.13:9996 tls=not_provided_by_remote :method=GET :authority=10.244.0.13:9996 :path=/ping ... ``` @@ -143,14 +143,14 @@ Because GCloud provides this additional level of access, there are cases where not. To validate this, check whether your GCloud user has Tap access: ```bash -kubectl auth can-i watch pods.tap.linkerd.io --all-namespaces +$ kubectl auth can-i watch pods.tap.linkerd.io --all-namespaces yes ``` And then validate whether your RBAC user has Tap access: ```bash -kubectl auth can-i watch pods.tap.linkerd.io --all-namespaces --as $(gcloud config get-value account) +$ kubectl auth can-i watch pods.tap.linkerd.io --all-namespaces --as $(gcloud config get-value account) no - no RBAC policy matched ``` @@ -187,14 +187,14 @@ privileges necessary to tap resources. To confirm: ```bash -kubectl auth can-i watch pods.tap.linkerd.io --all-namespaces --as system:serviceaccount:linkerd-viz:web +$ kubectl auth can-i watch pods.tap.linkerd.io --all-namespaces --as system:serviceaccount:linkerd-viz:web yes ``` This access is enabled via a `linkerd-linkerd-viz-web-admin` ClusterRoleBinding: ```bash -kubectl describe clusterrolebindings/linkerd-linkerd-viz-web-admin +$ kubectl describe clusterrolebindings/linkerd-linkerd-viz-web-admin Name: linkerd-linkerd-viz-web-admin Labels: component=web linkerd.io/extensions=viz @@ -227,6 +227,6 @@ kubectl delete clusterrolebindings/linkerd-linkerd-viz-web-admin To confirm: ```bash -kubectl auth can-i watch pods.tap.linkerd.io --all-namespaces --as system:serviceaccount:linkerd-viz:web +$ kubectl auth can-i watch pods.tap.linkerd.io --all-namespaces --as system:serviceaccount:linkerd-viz:web no ``` diff --git a/linkerd.io/content/2.13/tasks/troubleshooting.md b/linkerd.io/content/2.13/tasks/troubleshooting.md index b142ee66a9..ffe22e5153 100644 --- a/linkerd.io/content/2.13/tasks/troubleshooting.md +++ b/linkerd.io/content/2.13/tasks/troubleshooting.md @@ -230,7 +230,7 @@ Example failure: Ensure the Linkerd ClusterRoles exist: ```bash -kubectl get clusterroles | grep linkerd +$ kubectl get clusterroles | grep linkerd linkerd-linkerd-destination 9d linkerd-linkerd-identity 9d linkerd-linkerd-proxy-injector 9d @@ -240,7 +240,7 @@ linkerd-policy 9d Also ensure you have permission to create ClusterRoles: ```bash -kubectl auth can-i create clusterroles +$ kubectl auth can-i create clusterroles yes ``` @@ -257,7 +257,7 @@ Example failure: Ensure the Linkerd ClusterRoleBindings exist: ```bash -kubectl get clusterrolebindings | grep linkerd +$ kubectl get clusterrolebindings | grep linkerd linkerd-linkerd-destination 9d linkerd-linkerd-identity 9d linkerd-linkerd-proxy-injector 9d @@ -267,7 +267,7 @@ linkerd-destination-policy 9d Also ensure you have permission to create ClusterRoleBindings: ```bash -kubectl auth can-i create clusterrolebindings +$ kubectl auth can-i create clusterrolebindings yes ``` @@ -284,7 +284,7 @@ Example failure: Ensure the Linkerd ServiceAccounts exist: ```bash -kubectl -n linkerd get serviceaccounts +$ kubectl -n linkerd get serviceaccounts NAME SECRETS AGE default 1 14m linkerd-destination 1 14m @@ -297,7 +297,7 @@ Also ensure you have permission to create ServiceAccounts in the Linkerd namespace: ```bash -kubectl -n linkerd auth can-i create serviceaccounts +$ kubectl -n linkerd auth can-i create serviceaccounts yes ``` @@ -314,7 +314,7 @@ Example failure: Ensure the Linkerd CRD exists: ```bash -kubectl get customresourcedefinitions +$ kubectl get customresourcedefinitions NAME CREATED AT serviceprofiles.linkerd.io 2019-04-25T21:47:31Z ``` @@ -322,7 +322,7 @@ serviceprofiles.linkerd.io 2019-04-25T21:47:31Z Also ensure you have permission to create CRDs: ```bash -kubectl auth can-i create customresourcedefinitions +$ kubectl auth can-i create customresourcedefinitions yes ``` @@ -339,14 +339,14 @@ Example failure: Ensure the Linkerd MutatingWebhookConfigurations exists: ```bash -kubectl get mutatingwebhookconfigurations | grep linkerd +$ kubectl get mutatingwebhookconfigurations | grep linkerd linkerd-proxy-injector-webhook-config 2019-07-01T13:13:26Z ``` Also ensure you have permission to create MutatingWebhookConfigurations: ```bash -kubectl auth can-i create mutatingwebhookconfigurations +$ kubectl auth can-i create mutatingwebhookconfigurations yes ``` @@ -363,14 +363,14 @@ Example failure: Ensure the Linkerd ValidatingWebhookConfiguration exists: ```bash -kubectl get validatingwebhookconfigurations | grep linkerd +$ kubectl get validatingwebhookconfigurations | grep linkerd linkerd-sp-validator-webhook-config 2019-07-01T13:13:26Z ``` Also ensure you have permission to create ValidatingWebhookConfigurations: ```bash -kubectl auth can-i create validatingwebhookconfigurations +$ kubectl auth can-i create validatingwebhookconfigurations yes ``` @@ -418,7 +418,7 @@ Example failure: Ensure the Linkerd ConfigMap exists: ```bash -kubectl -n linkerd get configmap/linkerd-config +$ kubectl -n linkerd get configmap/linkerd-config NAME DATA AGE linkerd-config 3 61m ``` @@ -426,7 +426,7 @@ linkerd-config 3 61m Also ensure you have permission to create ConfigMaps: ```bash -kubectl -n linkerd auth can-i create configmap +$ kubectl -n linkerd auth can-i create configmap yes ``` @@ -780,7 +780,7 @@ Example failure: Verify the state of the control plane pods with: ```bash -kubectl -n linkerd get po +$ kubectl -n linkerd get po NAME READY STATUS RESTARTS AGE linkerd-destination-5fd7b5d466-szgqm 2/2 Running 1 12m linkerd-identity-54df78c479-hbh5m 2/2 Running 0 12m @@ -1281,7 +1281,7 @@ Make sure multicluster extension is correctly installed and that the `links.multicluster.linkerd.io` CRD is present. ```bash -kubectl get crds | grep multicluster +$ kubectl get crds | grep multicluster NAME CREATED AT links.multicluster.linkerd.io 2021-03-10T09:58:10Z ``` @@ -1360,7 +1360,7 @@ the rules section. Expected rules for `linkerd-service-mirror-access-local-resources` cluster role: ```bash -kubectl --context=local get clusterrole linkerd-service-mirror-access-local-resources -o yaml +$ kubectl --context=local get clusterrole linkerd-service-mirror-access-local-resources -o yaml kind: ClusterRole metadata: labels: @@ -1426,7 +1426,7 @@ everything to start up. If this is a permanent error, you'll want to validate the state of the controller pod with: ```bash -kubectl --all-namespaces get po --selector linkerd.io/control-plane-component=linkerd-service-mirror +$ kubectl --all-namespaces get po --selector linkerd.io/control-plane-component=linkerd-service-mirror NAME READY STATUS RESTARTS AGE linkerd-service-mirror-7bb8ff5967-zg265 2/2 Running 0 50m ``` @@ -1544,7 +1544,7 @@ Example failure: Ensure the linkerd-viz extension ClusterRoles exist: ```bash -kubectl get clusterroles | grep linkerd-viz +$ kubectl get clusterroles | grep linkerd-viz linkerd-linkerd-viz-metrics-api 2021-01-26T18:02:17Z linkerd-linkerd-viz-prometheus 2021-01-26T18:02:17Z linkerd-linkerd-viz-tap 2021-01-26T18:02:17Z @@ -1572,7 +1572,7 @@ Example failure: Ensure the linkerd-viz extension ClusterRoleBindings exist: ```bash -kubectl get clusterrolebindings | grep linkerd-viz +$ kubectl get clusterrolebindings | grep linkerd-viz linkerd-linkerd-viz-metrics-api ClusterRole/linkerd-linkerd-viz-metrics-api 18h linkerd-linkerd-viz-prometheus ClusterRole/linkerd-linkerd-viz-prometheus 18h linkerd-linkerd-viz-tap ClusterRole/linkerd-linkerd-viz-tap 18h @@ -1584,7 +1584,7 @@ linkerd-linkerd-viz-web-check ClusterRole/linkerd-linke Also ensure you have permission to create ClusterRoleBindings: ```bash -kubectl auth can-i create clusterrolebindings +$ kubectl auth can-i create clusterrolebindings yes ``` @@ -1673,7 +1673,7 @@ requirements in the cluster: Ensure all the linkerd-viz pods are injected ```bash -kubectl -n linkerd-viz get pods +$ kubectl -n linkerd-viz get pods NAME READY STATUS RESTARTS AGE grafana-68cddd7cc8-nrv4h 2/2 Running 3 18h metrics-api-77f684f7c7-hnw8r 2/2 Running 2 18h @@ -1697,7 +1697,7 @@ Make sure that the `proxy-injector` is working correctly by running Ensure all the linkerd-viz pods are running with 2/2 ```bash -kubectl -n linkerd-viz get pods +$ kubectl -n linkerd-viz get pods NAME READY STATUS RESTARTS AGE grafana-68cddd7cc8-nrv4h 2/2 Running 3 18h metrics-api-77f684f7c7-hnw8r 2/2 Running 2 18h @@ -1721,12 +1721,12 @@ Make sure that the `proxy-injector` is working correctly by running Ensure all the prometheus related resources are present and running correctly. ```bash -❯ kubectl -n linkerd-viz get deploy,cm | grep prometheus +$ kubectl -n linkerd-viz get deploy,cm | grep prometheus deployment.apps/prometheus 1/1 1 1 3m18s configmap/prometheus-config 1 3m18s -❯ kubectl get clusterRoleBindings | grep prometheus +$ kubectl get clusterRoleBindings | grep prometheus linkerd-linkerd-viz-prometheus ClusterRole/linkerd-linkerd-viz-prometheus 3m37s -❯ kubectl get clusterRoles | grep prometheus +$ kubectl get clusterRoles | grep prometheus linkerd-linkerd-viz-prometheus 2021-02-26T06:03:11Zh ``` @@ -1742,7 +1742,7 @@ Example failure: Verify that the metrics API pod is running correctly ```bash -❯ kubectl -n linkerd-viz get pods +$ kubectl -n linkerd-viz get pods NAME READY STATUS RESTARTS AGE metrics-api-7bb8cb8489-cbq4m 2/2 Running 0 4m58s tap-injector-6b9bc6fc4-cgbr4 2/2 Running 0 4m56s @@ -1880,7 +1880,7 @@ versions in sync by updating either the CLI or linkerd-jaeger as necessary. Ensure all the jaeger pods are injected ```bash -kubectl -n linkerd-jaeger get pods +$ kubectl -n linkerd-jaeger get pods NAME READY STATUS RESTARTS AGE collector-69cc44dfbc-rhpfg 2/2 Running 0 11s jaeger-6f98d5c979-scqlq 2/2 Running 0 11s @@ -1901,7 +1901,7 @@ Make sure that the `proxy-injector` is working correctly by running Ensure all the linkerd-jaeger pods are running with 2/2 ```bash -kubectl -n linkerd-jaeger get pods +$ kubectl -n linkerd-jaeger get pods NAME READY STATUS RESTARTS AGE jaeger-injector-548684d74b-bcq5h 2/2 Running 0 5s collector-69cc44dfbc-wqf6s 2/2 Running 0 5s @@ -1950,7 +1950,7 @@ Ensure you can connect to the Linkerd Buoyant version check endpoint from the environment the `linkerd` cli is running: ```bash -curl https://buoyant.cloud/version.json +$ curl https://buoyant.cloud/version.json {"linkerd-buoyant":"v0.4.4"} ``` @@ -2015,7 +2015,7 @@ linkerd-buoyant install | kubectl apply -f - Ensure that the cluster role exists: ```bash -kubectl get clusterrole buoyant-cloud-agent +$ kubectl get clusterrole buoyant-cloud-agent NAME CREATED AT buoyant-cloud-agent 2020-11-13T00:59:50Z ``` @@ -2023,7 +2023,7 @@ buoyant-cloud-agent 2020-11-13T00:59:50Z Also ensure you have permission to create ClusterRoles: ```bash -kubectl auth can-i create ClusterRoles +$ kubectl auth can-i create clusterroles yes ``` @@ -2038,7 +2038,7 @@ yes Ensure that the cluster role binding exists: ```bash -kubectl get clusterrolebinding buoyant-cloud-agent +$ kubectl get clusterrolebinding buoyant-cloud-agent NAME ROLE AGE buoyant-cloud-agent ClusterRole/buoyant-cloud-agent 301d ``` @@ -2046,7 +2046,7 @@ buoyant-cloud-agent ClusterRole/buoyant-cloud-agent 301d Also ensure you have permission to create ClusterRoleBindings: ```bash -kubectl auth can-i create ClusterRoleBindings +$ kubectl auth can-i create ClusterRoleBindings yes ``` @@ -2061,7 +2061,7 @@ yes Ensure that the service account exists: ```bash -kubectl -n buoyant-cloud get serviceaccount buoyant-cloud-agent +$ kubectl -n buoyant-cloud get serviceaccount buoyant-cloud-agent NAME SECRETS AGE buoyant-cloud-agent 1 301d ``` @@ -2069,7 +2069,7 @@ buoyant-cloud-agent 1 301d Also ensure you have permission to create ServiceAccounts: ```bash -kubectl -n buoyant-cloud auth can-i create ServiceAccount +$ kubectl -n buoyant-cloud auth can-i create ServiceAccount yes ``` @@ -2084,7 +2084,7 @@ yes Ensure that the secret exists: ```bash -kubectl -n buoyant-cloud get secret buoyant-cloud-id +$ kubectl -n buoyant-cloud get secret buoyant-cloud-id NAME TYPE DATA AGE buoyant-cloud-id Opaque 4 301d ``` @@ -2092,7 +2092,7 @@ buoyant-cloud-id Opaque 4 301d Also ensure you have permission to create ServiceAccounts: ```bash -kubectl -n buoyant-cloud auth can-i create ServiceAccount +$ kubectl -n buoyant-cloud auth can-i create ServiceAccount yes ``` @@ -2107,14 +2107,14 @@ yes Ensure the `buoyant-cloud-agent` Deployment exists: ```bash -kubectl -n buoyant-cloud get deploy/buoyant-cloud-agent +$ kubectl -n buoyant-cloud get deploy/buoyant-cloud-agent ``` If the Deployment does not exist, the `linkerd-buoyant` installation may be missing or incomplete. To reinstall the extension: ```bash -linkerd-buoyant install | kubectl apply -f - +$ linkerd-buoyant install | kubectl apply -f - ``` ### √ buoyant-cloud-agent Deployment is running @@ -2130,7 +2130,7 @@ everything to start up. If this is a permanent error, you'll want to validate the state of the `buoyant-cloud-agent` Deployment with: ```bash -kubectl -n buoyant-cloud get po --selector app=buoyant-cloud-agent +$ kubectl -n buoyant-cloud get po --selector app=buoyant-cloud-agent NAME READY STATUS RESTARTS AGE buoyant-cloud-agent-6b8c6888d7-htr7d 2/2 Running 0 156m ``` @@ -2153,7 +2153,7 @@ Ensure the `buoyant-cloud-agent` pod is injected, the `READY` column should show `2/2`: ```bash -kubectl -n buoyant-cloud get pods --selector app=buoyant-cloud-agent +$ kubectl -n buoyant-cloud get pods --selector app=buoyant-cloud-agent NAME READY STATUS RESTARTS AGE buoyant-cloud-agent-6b8c6888d7-htr7d 2/2 Running 0 161m ``` @@ -2172,7 +2172,7 @@ Make sure that the `proxy-injector` is working correctly by running Check the version with: ```bash -linkerd-buoyant version +$ linkerd-buoyant version CLI version: v0.4.4 Agent version: v0.4.4 ``` @@ -2180,7 +2180,7 @@ Agent version: v0.4.4 To update to the latest version: ```bash -linkerd-buoyant install | kubectl apply -f - +$ linkerd-buoyant install | kubectl apply -f - ``` ### √ buoyant-cloud-agent Deployment is running a single pod @@ -2194,7 +2194,7 @@ linkerd-buoyant install | kubectl apply -f - `buoyant-cloud-agent` should run as a singleton. Check for other pods: ```bash -kubectl get po -A --selector app=buoyant-cloud-agent +$ kubectl get po -A --selector app=buoyant-cloud-agent ``` ### √ buoyant-cloud-metrics DaemonSet exists @@ -2208,14 +2208,14 @@ kubectl get po -A --selector app=buoyant-cloud-agent Ensure the `buoyant-cloud-metrics` DaemonSet exists: ```bash -kubectl -n buoyant-cloud get daemonset/buoyant-cloud-metrics +$ kubectl -n buoyant-cloud get daemonset/buoyant-cloud-metrics ``` If the DaemonSet does not exist, the `linkerd-buoyant` installation may be missing or incomplete. To reinstall the extension: ```bash -linkerd-buoyant install | kubectl apply -f - +$ linkerd-buoyant install | kubectl apply -f - ``` ### √ buoyant-cloud-metrics DaemonSet is running @@ -2231,7 +2231,7 @@ everything to start up. If this is a permanent error, you'll want to validate the state of the `buoyant-cloud-metrics` DaemonSet with: ```bash -kubectl -n buoyant-cloud get po --selector app=buoyant-cloud-metrics +$ kubectl -n buoyant-cloud get po --selector app=buoyant-cloud-metrics NAME READY STATUS RESTARTS AGE buoyant-cloud-metrics-kt9mv 2/2 Running 0 163m buoyant-cloud-metrics-q8jhj 2/2 Running 0 163m @@ -2257,7 +2257,7 @@ Ensure the `buoyant-cloud-metrics` pods are injected, the `READY` column should show `2/2`: ```bash -kubectl -n buoyant-cloud get pods --selector app=buoyant-cloud-metrics +$ kubectl -n buoyant-cloud get pods --selector app=buoyant-cloud-metrics NAME READY STATUS RESTARTS AGE buoyant-cloud-metrics-kt9mv 2/2 Running 0 166m buoyant-cloud-metrics-q8jhj 2/2 Running 0 166m @@ -2279,7 +2279,7 @@ Make sure that the `proxy-injector` is working correctly by running Check the version with: ```bash -kubectl -n buoyant-cloud get daemonset/buoyant-cloud-metrics -o jsonpath='{.metadata.labels}' +$ kubectl -n buoyant-cloud get daemonset/buoyant-cloud-metrics -o jsonpath='{.metadata.labels}' {"app.kubernetes.io/name":"metrics","app.kubernetes.io/part-of":"buoyant-cloud","app.kubernetes.io/version":"v0.4.4"} ``` diff --git a/linkerd.io/content/2.13/tasks/uninstall-multicluster.md b/linkerd.io/content/2.13/tasks/uninstall-multicluster.md index 0b850fe583..3f67abbc57 100644 --- a/linkerd.io/content/2.13/tasks/uninstall-multicluster.md +++ b/linkerd.io/content/2.13/tasks/uninstall-multicluster.md @@ -25,7 +25,7 @@ To unlink, run the `linkerd multicluster unlink` command and pipe the output to `kubectl delete`: ```bash -linkerd multicluster unlink --cluster-name=target | kubectl delete -f - +$ linkerd multicluster unlink --cluster-name=target | kubectl delete -f - ``` ## Uninstalling @@ -36,7 +36,7 @@ account. Before you can uninstall, you must remove all existing links as described above. Once all links have been removed, run: ```bash -linkerd multicluster uninstall | kubectl delete -f - +$ linkerd multicluster uninstall | kubectl delete -f - ``` Attempting to uninstall while at least one link remains will result in an error. diff --git a/linkerd.io/content/2.14/tasks/troubleshooting.md b/linkerd.io/content/2.14/tasks/troubleshooting.md index b142ee66a9..6dd989f3b3 100644 --- a/linkerd.io/content/2.14/tasks/troubleshooting.md +++ b/linkerd.io/content/2.14/tasks/troubleshooting.md @@ -230,7 +230,7 @@ Example failure: Ensure the Linkerd ClusterRoles exist: ```bash -kubectl get clusterroles | grep linkerd +$ kubectl get clusterroles | grep linkerd linkerd-linkerd-destination 9d linkerd-linkerd-identity 9d linkerd-linkerd-proxy-injector 9d @@ -240,7 +240,7 @@ linkerd-policy 9d Also ensure you have permission to create ClusterRoles: ```bash -kubectl auth can-i create clusterroles +$ kubectl auth can-i create clusterroles yes ``` @@ -257,7 +257,7 @@ Example failure: Ensure the Linkerd ClusterRoleBindings exist: ```bash -kubectl get clusterrolebindings | grep linkerd +$ kubectl get clusterrolebindings | grep linkerd linkerd-linkerd-destination 9d linkerd-linkerd-identity 9d linkerd-linkerd-proxy-injector 9d @@ -267,7 +267,7 @@ linkerd-destination-policy 9d Also ensure you have permission to create ClusterRoleBindings: ```bash -kubectl auth can-i create clusterrolebindings +$ kubectl auth can-i create clusterrolebindings yes ``` @@ -284,7 +284,7 @@ Example failure: Ensure the Linkerd ServiceAccounts exist: ```bash -kubectl -n linkerd get serviceaccounts +$ kubectl -n linkerd get serviceaccounts NAME SECRETS AGE default 1 14m linkerd-destination 1 14m @@ -297,7 +297,7 @@ Also ensure you have permission to create ServiceAccounts in the Linkerd namespace: ```bash -kubectl -n linkerd auth can-i create serviceaccounts +$ kubectl -n linkerd auth can-i create serviceaccounts yes ``` @@ -314,7 +314,7 @@ Example failure: Ensure the Linkerd CRD exists: ```bash -kubectl get customresourcedefinitions +$ kubectl get customresourcedefinitions NAME CREATED AT serviceprofiles.linkerd.io 2019-04-25T21:47:31Z ``` @@ -322,7 +322,7 @@ serviceprofiles.linkerd.io 2019-04-25T21:47:31Z Also ensure you have permission to create CRDs: ```bash -kubectl auth can-i create customresourcedefinitions +$ kubectl auth can-i create customresourcedefinitions yes ``` @@ -339,14 +339,14 @@ Example failure: Ensure the Linkerd MutatingWebhookConfigurations exists: ```bash -kubectl get mutatingwebhookconfigurations | grep linkerd +$ kubectl get mutatingwebhookconfigurations | grep linkerd linkerd-proxy-injector-webhook-config 2019-07-01T13:13:26Z ``` Also ensure you have permission to create MutatingWebhookConfigurations: ```bash -kubectl auth can-i create mutatingwebhookconfigurations +$ kubectl auth can-i create mutatingwebhookconfigurations yes ``` @@ -363,14 +363,14 @@ Example failure: Ensure the Linkerd ValidatingWebhookConfiguration exists: ```bash -kubectl get validatingwebhookconfigurations | grep linkerd +$ kubectl get validatingwebhookconfigurations | grep linkerd linkerd-sp-validator-webhook-config 2019-07-01T13:13:26Z ``` Also ensure you have permission to create ValidatingWebhookConfigurations: ```bash -kubectl auth can-i create validatingwebhookconfigurations +$ kubectl auth can-i create validatingwebhookconfigurations yes ``` @@ -418,7 +418,7 @@ Example failure: Ensure the Linkerd ConfigMap exists: ```bash -kubectl -n linkerd get configmap/linkerd-config +$ kubectl -n linkerd get configmap/linkerd-config NAME DATA AGE linkerd-config 3 61m ``` @@ -426,7 +426,7 @@ linkerd-config 3 61m Also ensure you have permission to create ConfigMaps: ```bash -kubectl -n linkerd auth can-i create configmap +$ kubectl -n linkerd auth can-i create configmap yes ``` @@ -780,7 +780,7 @@ Example failure: Verify the state of the control plane pods with: ```bash -kubectl -n linkerd get po +$ kubectl -n linkerd get po NAME READY STATUS RESTARTS AGE linkerd-destination-5fd7b5d466-szgqm 2/2 Running 1 12m linkerd-identity-54df78c479-hbh5m 2/2 Running 0 12m @@ -1045,7 +1045,7 @@ Ensure the kube-system namespace has the `config.linkerd.io/admission-webhooks:disabled` label: ```bash -kubectl get namespace kube-system -oyaml +$ kubectl get namespace kube-system -oyaml kind: Namespace apiVersion: v1 metadata: @@ -1118,7 +1118,7 @@ Example error: Ensure that the linkerd-cni-config ConfigMap exists in the CNI namespace: ```bash -kubectl get cm linkerd-cni-config -n linkerd-cni +$ kubectl get cm linkerd-cni-config -n linkerd-cni NAME PRIV CAPS SELINUX RUNASUSER FSGROUP SUPGROUP READONLYROOTFS VOLUMES linkerd-linkerd-cni-cni false RunAsAny RunAsAny RunAsAny RunAsAny false hostPath,secret ``` @@ -1126,7 +1126,7 @@ linkerd-linkerd-cni-cni false RunAsAny RunAsAny RunAsAny RunAs Also ensure you have permission to create ConfigMaps: ```bash -kubectl auth can-i create ConfigMaps +$ kubectl auth can-i create ConfigMaps yes ``` @@ -1151,7 +1151,7 @@ linkerd-cni 54m Also ensure you have permission to create ClusterRoles: ```bash -kubectl auth can-i create ClusterRoles +$ kubectl auth can-i create ClusterRoles yes ``` @@ -1176,7 +1176,7 @@ linkerd-cni 54m Also ensure you have permission to create ClusterRoleBindings: ```bash -kubectl auth can-i create ClusterRoleBindings +$ kubectl auth can-i create ClusterRoleBindings yes ``` @@ -1193,7 +1193,7 @@ Example error: Ensure that the CNI service account exists in the CNI namespace: ```bash -kubectl get ServiceAccount linkerd-cni -n linkerd-cni +$ kubectl get ServiceAccount linkerd-cni -n linkerd-cni NAME SECRETS AGE linkerd-cni 1 45m ``` @@ -1201,7 +1201,7 @@ linkerd-cni 1 45m Also ensure you have permission to create ServiceAccount: ```bash -kubectl auth can-i create ServiceAccounts -n linkerd-cni +$ kubectl auth can-i create ServiceAccounts -n linkerd-cni yes ``` @@ -1218,7 +1218,7 @@ Example error: Ensure that the CNI daemonset exists in the CNI namespace: ```bash -kubectl get ds -n linkerd-cni +$ kubectl get ds -n linkerd-cni NAME DESIRED CURRENT READY UP-TO-DATE AVAILABLE NODE SELECTOR AGE linkerd-cni 1 1 1 1 1 beta.kubernetes.io/os=linux 14m ``` @@ -1226,7 +1226,7 @@ linkerd-cni 1 1 1 1 1 beta.kubernet Also ensure you have permission to create DaemonSets: ```bash -kubectl auth can-i create DaemonSets -n linkerd-cni +$ kubectl auth can-i create DaemonSets -n linkerd-cni yes ``` @@ -1243,7 +1243,7 @@ Example failure: Ensure that all the CNI pods are running: ```bash -kubectl get po -n linkerd-cni +$ kubectl get po -n linkerd-cni NAME READY STATUS RESTARTS AGE linkerd-cni-rzp2q 1/1 Running 0 9m20s linkerd-cni-mf564 1/1 Running 0 9m22s @@ -1281,7 +1281,7 @@ Make sure multicluster extension is correctly installed and that the `links.multicluster.linkerd.io` CRD is present. ```bash -kubectl get crds | grep multicluster +$ kubectl get crds | grep multicluster NAME CREATED AT links.multicluster.linkerd.io 2021-03-10T09:58:10Z ``` @@ -1360,7 +1360,7 @@ the rules section. Expected rules for `linkerd-service-mirror-access-local-resources` cluster role: ```bash -kubectl --context=local get clusterrole linkerd-service-mirror-access-local-resources -o yaml +$ kubectl --context=local get clusterrole linkerd-service-mirror-access-local-resources -o yaml kind: ClusterRole metadata: labels: @@ -1426,7 +1426,7 @@ everything to start up. If this is a permanent error, you'll want to validate the state of the controller pod with: ```bash -kubectl --all-namespaces get po --selector linkerd.io/control-plane-component=linkerd-service-mirror +$ kubectl --all-namespaces get po --selector linkerd.io/control-plane-component=linkerd-service-mirror NAME READY STATUS RESTARTS AGE linkerd-service-mirror-7bb8ff5967-zg265 2/2 Running 0 50m ``` @@ -1544,7 +1544,7 @@ Example failure: Ensure the linkerd-viz extension ClusterRoles exist: ```bash -kubectl get clusterroles | grep linkerd-viz +$ kubectl get clusterroles | grep linkerd-viz linkerd-linkerd-viz-metrics-api 2021-01-26T18:02:17Z linkerd-linkerd-viz-prometheus 2021-01-26T18:02:17Z linkerd-linkerd-viz-tap 2021-01-26T18:02:17Z @@ -1572,7 +1572,7 @@ Example failure: Ensure the linkerd-viz extension ClusterRoleBindings exist: ```bash -kubectl get clusterrolebindings | grep linkerd-viz +$ kubectl get clusterrolebindings | grep linkerd-viz linkerd-linkerd-viz-metrics-api ClusterRole/linkerd-linkerd-viz-metrics-api 18h linkerd-linkerd-viz-prometheus ClusterRole/linkerd-linkerd-viz-prometheus 18h linkerd-linkerd-viz-tap ClusterRole/linkerd-linkerd-viz-tap 18h @@ -1584,7 +1584,7 @@ linkerd-linkerd-viz-web-check ClusterRole/linkerd-linke Also ensure you have permission to create ClusterRoleBindings: ```bash -kubectl auth can-i create clusterrolebindings +$ kubectl auth can-i create clusterrolebindings yes ``` @@ -1673,7 +1673,7 @@ requirements in the cluster: Ensure all the linkerd-viz pods are injected ```bash -kubectl -n linkerd-viz get pods +$ kubectl -n linkerd-viz get pods NAME READY STATUS RESTARTS AGE grafana-68cddd7cc8-nrv4h 2/2 Running 3 18h metrics-api-77f684f7c7-hnw8r 2/2 Running 2 18h @@ -1697,7 +1697,7 @@ Make sure that the `proxy-injector` is working correctly by running Ensure all the linkerd-viz pods are running with 2/2 ```bash -kubectl -n linkerd-viz get pods +$ kubectl -n linkerd-viz get pods NAME READY STATUS RESTARTS AGE grafana-68cddd7cc8-nrv4h 2/2 Running 3 18h metrics-api-77f684f7c7-hnw8r 2/2 Running 2 18h @@ -1721,12 +1721,12 @@ Make sure that the `proxy-injector` is working correctly by running Ensure all the prometheus related resources are present and running correctly. ```bash -❯ kubectl -n linkerd-viz get deploy,cm | grep prometheus +$ kubectl -n linkerd-viz get deploy,cm | grep prometheus deployment.apps/prometheus 1/1 1 1 3m18s configmap/prometheus-config 1 3m18s -❯ kubectl get clusterRoleBindings | grep prometheus +$ kubectl get clusterRoleBindings | grep prometheus linkerd-linkerd-viz-prometheus ClusterRole/linkerd-linkerd-viz-prometheus 3m37s -❯ kubectl get clusterRoles | grep prometheus +$ kubectl get clusterRoles | grep prometheus linkerd-linkerd-viz-prometheus 2021-02-26T06:03:11Zh ``` @@ -1742,7 +1742,7 @@ Example failure: Verify that the metrics API pod is running correctly ```bash -❯ kubectl -n linkerd-viz get pods +$ kubectl -n linkerd-viz get pods NAME READY STATUS RESTARTS AGE metrics-api-7bb8cb8489-cbq4m 2/2 Running 0 4m58s tap-injector-6b9bc6fc4-cgbr4 2/2 Running 0 4m56s @@ -1880,7 +1880,7 @@ versions in sync by updating either the CLI or linkerd-jaeger as necessary. Ensure all the jaeger pods are injected ```bash -kubectl -n linkerd-jaeger get pods +$ kubectl -n linkerd-jaeger get pods NAME READY STATUS RESTARTS AGE collector-69cc44dfbc-rhpfg 2/2 Running 0 11s jaeger-6f98d5c979-scqlq 2/2 Running 0 11s @@ -1901,7 +1901,7 @@ Make sure that the `proxy-injector` is working correctly by running Ensure all the linkerd-jaeger pods are running with 2/2 ```bash -kubectl -n linkerd-jaeger get pods +$ kubectl -n linkerd-jaeger get pods NAME READY STATUS RESTARTS AGE jaeger-injector-548684d74b-bcq5h 2/2 Running 0 5s collector-69cc44dfbc-wqf6s 2/2 Running 0 5s @@ -2015,7 +2015,7 @@ linkerd-buoyant install | kubectl apply -f - Ensure that the cluster role exists: ```bash -kubectl get clusterrole buoyant-cloud-agent +$ kubectl get clusterrole buoyant-cloud-agent NAME CREATED AT buoyant-cloud-agent 2020-11-13T00:59:50Z ``` @@ -2023,7 +2023,7 @@ buoyant-cloud-agent 2020-11-13T00:59:50Z Also ensure you have permission to create ClusterRoles: ```bash -kubectl auth can-i create ClusterRoles +$ kubectl auth can-i create clusterroles yes ``` @@ -2038,7 +2038,7 @@ yes Ensure that the cluster role binding exists: ```bash -kubectl get clusterrolebinding buoyant-cloud-agent +$ kubectl get clusterrolebinding buoyant-cloud-agent NAME ROLE AGE buoyant-cloud-agent ClusterRole/buoyant-cloud-agent 301d ``` @@ -2046,7 +2046,7 @@ buoyant-cloud-agent ClusterRole/buoyant-cloud-agent 301d Also ensure you have permission to create ClusterRoleBindings: ```bash -kubectl auth can-i create ClusterRoleBindings +$ kubectl auth can-i create ClusterRoleBindings yes ``` @@ -2061,7 +2061,7 @@ yes Ensure that the service account exists: ```bash -kubectl -n buoyant-cloud get serviceaccount buoyant-cloud-agent +$ kubectl -n buoyant-cloud get serviceaccount buoyant-cloud-agent NAME SECRETS AGE buoyant-cloud-agent 1 301d ``` @@ -2069,7 +2069,7 @@ buoyant-cloud-agent 1 301d Also ensure you have permission to create ServiceAccounts: ```bash -kubectl -n buoyant-cloud auth can-i create ServiceAccount +$ kubectl -n buoyant-cloud auth can-i create ServiceAccount yes ``` @@ -2084,7 +2084,7 @@ yes Ensure that the secret exists: ```bash -kubectl -n buoyant-cloud get secret buoyant-cloud-id +$ kubectl -n buoyant-cloud get secret buoyant-cloud-id NAME TYPE DATA AGE buoyant-cloud-id Opaque 4 301d ``` @@ -2092,7 +2092,7 @@ buoyant-cloud-id Opaque 4 301d Also ensure you have permission to create ServiceAccounts: ```bash -kubectl -n buoyant-cloud auth can-i create ServiceAccount +$ kubectl -n buoyant-cloud auth can-i create ServiceAccount yes ``` @@ -2114,7 +2114,7 @@ If the Deployment does not exist, the `linkerd-buoyant` installation may be missing or incomplete. To reinstall the extension: ```bash -linkerd-buoyant install | kubectl apply -f - +$ linkerd-buoyant install | kubectl apply -f - ``` ### √ buoyant-cloud-agent Deployment is running @@ -2130,7 +2130,7 @@ everything to start up. If this is a permanent error, you'll want to validate the state of the `buoyant-cloud-agent` Deployment with: ```bash -kubectl -n buoyant-cloud get po --selector app=buoyant-cloud-agent +$ kubectl -n buoyant-cloud get po --selector app=buoyant-cloud-agent NAME READY STATUS RESTARTS AGE buoyant-cloud-agent-6b8c6888d7-htr7d 2/2 Running 0 156m ``` @@ -2153,7 +2153,7 @@ Ensure the `buoyant-cloud-agent` pod is injected, the `READY` column should show `2/2`: ```bash -kubectl -n buoyant-cloud get pods --selector app=buoyant-cloud-agent +$ kubectl -n buoyant-cloud get pods --selector app=buoyant-cloud-agent NAME READY STATUS RESTARTS AGE buoyant-cloud-agent-6b8c6888d7-htr7d 2/2 Running 0 161m ``` @@ -2172,7 +2172,7 @@ Make sure that the `proxy-injector` is working correctly by running Check the version with: ```bash -linkerd-buoyant version +$ linkerd-buoyant version CLI version: v0.4.4 Agent version: v0.4.4 ``` @@ -2231,7 +2231,7 @@ everything to start up. If this is a permanent error, you'll want to validate the state of the `buoyant-cloud-metrics` DaemonSet with: ```bash -kubectl -n buoyant-cloud get po --selector app=buoyant-cloud-metrics +$ kubectl -n buoyant-cloud get po --selector app=buoyant-cloud-metrics NAME READY STATUS RESTARTS AGE buoyant-cloud-metrics-kt9mv 2/2 Running 0 163m buoyant-cloud-metrics-q8jhj 2/2 Running 0 163m @@ -2257,7 +2257,7 @@ Ensure the `buoyant-cloud-metrics` pods are injected, the `READY` column should show `2/2`: ```bash -kubectl -n buoyant-cloud get pods --selector app=buoyant-cloud-metrics +$ kubectl -n buoyant-cloud get pods --selector app=buoyant-cloud-metrics NAME READY STATUS RESTARTS AGE buoyant-cloud-metrics-kt9mv 2/2 Running 0 166m buoyant-cloud-metrics-q8jhj 2/2 Running 0 166m @@ -2279,7 +2279,7 @@ Make sure that the `proxy-injector` is working correctly by running Check the version with: ```bash -kubectl -n buoyant-cloud get daemonset/buoyant-cloud-metrics -o jsonpath='{.metadata.labels}' +$ kubectl -n buoyant-cloud get daemonset/buoyant-cloud-metrics -o jsonpath='{.metadata.labels}' {"app.kubernetes.io/name":"metrics","app.kubernetes.io/part-of":"buoyant-cloud","app.kubernetes.io/version":"v0.4.4"} ``` diff --git a/linkerd.io/content/2.15/tasks/troubleshooting.md b/linkerd.io/content/2.15/tasks/troubleshooting.md index 8b1d054813..c5dd2f3e0a 100644 --- a/linkerd.io/content/2.15/tasks/troubleshooting.md +++ b/linkerd.io/content/2.15/tasks/troubleshooting.md @@ -132,9 +132,9 @@ For more information see these pages in the Kubernetes Documentation: Also verify that these command works: ```bash -$ kubectl config view -$ kubectl cluster-info -$ kubectl version +kubectl config view +kubectl cluster-info +kubectl version ``` Another example failure: @@ -230,7 +230,7 @@ Example failure: Ensure the Linkerd ClusterRoles exist: ```bash -kubectl get clusterroles | grep linkerd +$ kubectl get clusterroles | grep linkerd linkerd-linkerd-destination 9d linkerd-linkerd-identity 9d linkerd-linkerd-proxy-injector 9d @@ -240,7 +240,7 @@ linkerd-policy 9d Also ensure you have permission to create ClusterRoles: ```bash -kubectl auth can-i create clusterroles +$ kubectl auth can-i create clusterroles yes ``` @@ -257,7 +257,7 @@ Example failure: Ensure the Linkerd ClusterRoleBindings exist: ```bash -kubectl get clusterrolebindings | grep linkerd +$ kubectl get clusterrolebindings | grep linkerd linkerd-linkerd-destination 9d linkerd-linkerd-identity 9d linkerd-linkerd-proxy-injector 9d @@ -267,7 +267,7 @@ linkerd-destination-policy 9d Also ensure you have permission to create ClusterRoleBindings: ```bash -kubectl auth can-i create clusterrolebindings +$ kubectl auth can-i create clusterrolebindings yes ``` @@ -284,7 +284,7 @@ Example failure: Ensure the Linkerd ServiceAccounts exist: ```bash -kubectl -n linkerd get serviceaccounts +$ kubectl -n linkerd get serviceaccounts NAME SECRETS AGE default 1 14m linkerd-destination 1 14m @@ -297,7 +297,7 @@ Also ensure you have permission to create ServiceAccounts in the Linkerd namespace: ```bash -kubectl -n linkerd auth can-i create serviceaccounts +$ kubectl -n linkerd auth can-i create serviceaccounts yes ``` @@ -314,7 +314,7 @@ Example failure: Ensure the Linkerd CRD exists: ```bash -kubectl get customresourcedefinitions +$ kubectl get customresourcedefinitions NAME CREATED AT serviceprofiles.linkerd.io 2019-04-25T21:47:31Z ``` @@ -322,7 +322,7 @@ serviceprofiles.linkerd.io 2019-04-25T21:47:31Z Also ensure you have permission to create CRDs: ```bash -kubectl auth can-i create customresourcedefinitions +$ kubectl auth can-i create customresourcedefinitions yes ``` @@ -339,14 +339,14 @@ Example failure: Ensure the Linkerd MutatingWebhookConfigurations exists: ```bash -kubectl get mutatingwebhookconfigurations | grep linkerd +$ kubectl get mutatingwebhookconfigurations | grep linkerd linkerd-proxy-injector-webhook-config 2019-07-01T13:13:26Z ``` Also ensure you have permission to create MutatingWebhookConfigurations: ```bash -kubectl auth can-i create mutatingwebhookconfigurations +$ kubectl auth can-i create mutatingwebhookconfigurations yes ``` @@ -363,14 +363,14 @@ Example failure: Ensure the Linkerd ValidatingWebhookConfiguration exists: ```bash -kubectl get validatingwebhookconfigurations | grep linkerd +$ kubectl get validatingwebhookconfigurations | grep linkerd linkerd-sp-validator-webhook-config 2019-07-01T13:13:26Z ``` Also ensure you have permission to create ValidatingWebhookConfigurations: ```bash -kubectl auth can-i create validatingwebhookconfigurations +$ kubectl auth can-i create validatingwebhookconfigurations yes ``` @@ -418,7 +418,7 @@ Example failure: Ensure the Linkerd ConfigMap exists: ```bash -kubectl -n linkerd get configmap/linkerd-config +$ kubectl -n linkerd get configmap/linkerd-config NAME DATA AGE linkerd-config 3 61m ``` @@ -426,7 +426,7 @@ linkerd-config 3 61m Also ensure you have permission to create ConfigMaps: ```bash -kubectl -n linkerd auth can-i create configmap +$ kubectl -n linkerd auth can-i create configmap yes ``` @@ -780,7 +780,7 @@ Example failure: Verify the state of the control plane pods with: ```bash -kubectl -n linkerd get po +$ kubectl -n linkerd get po NAME READY STATUS RESTARTS AGE linkerd-destination-5fd7b5d466-szgqm 2/2 Running 1 12m linkerd-identity-54df78c479-hbh5m 2/2 Running 0 12m @@ -1296,7 +1296,7 @@ Make sure multicluster extension is correctly installed and that the `links.multicluster.linkerd.io` CRD is present. ```bash -kubectl get crds | grep multicluster +$ kubectl get crds | grep multicluster NAME CREATED AT links.multicluster.linkerd.io 2021-03-10T09:58:10Z ``` @@ -1375,7 +1375,7 @@ the rules section. Expected rules for `linkerd-service-mirror-access-local-resources` cluster role: ```bash -kubectl --context=local get clusterrole linkerd-service-mirror-access-local-resources -o yaml +$ kubectl --context=local get clusterrole linkerd-service-mirror-access-local-resources -o yaml kind: ClusterRole metadata: labels: @@ -1441,7 +1441,7 @@ everything to start up. If this is a permanent error, you'll want to validate the state of the controller pod with: ```bash -kubectl --all-namespaces get po --selector linkerd.io/control-plane-component=linkerd-service-mirror +$ kubectl --all-namespaces get po --selector linkerd.io/control-plane-component=linkerd-service-mirror NAME READY STATUS RESTARTS AGE linkerd-service-mirror-7bb8ff5967-zg265 2/2 Running 0 50m ``` @@ -1559,7 +1559,7 @@ Example failure: Ensure the linkerd-viz extension ClusterRoles exist: ```bash -kubectl get clusterroles | grep linkerd-viz +$ kubectl get clusterroles | grep linkerd-viz linkerd-linkerd-viz-metrics-api 2021-01-26T18:02:17Z linkerd-linkerd-viz-prometheus 2021-01-26T18:02:17Z linkerd-linkerd-viz-tap 2021-01-26T18:02:17Z @@ -1587,7 +1587,7 @@ Example failure: Ensure the linkerd-viz extension ClusterRoleBindings exist: ```bash -kubectl get clusterrolebindings | grep linkerd-viz +$ kubectl get clusterrolebindings | grep linkerd-viz linkerd-linkerd-viz-metrics-api ClusterRole/linkerd-linkerd-viz-metrics-api 18h linkerd-linkerd-viz-prometheus ClusterRole/linkerd-linkerd-viz-prometheus 18h linkerd-linkerd-viz-tap ClusterRole/linkerd-linkerd-viz-tap 18h @@ -1599,7 +1599,7 @@ linkerd-linkerd-viz-web-check ClusterRole/linkerd-linke Also ensure you have permission to create ClusterRoleBindings: ```bash -kubectl auth can-i create clusterrolebindings +$ kubectl auth can-i create clusterrolebindings yes ``` @@ -1688,7 +1688,7 @@ requirements in the cluster: Ensure all the linkerd-viz pods are injected ```bash -kubectl -n linkerd-viz get pods +$ kubectl -n linkerd-viz get pods NAME READY STATUS RESTARTS AGE grafana-68cddd7cc8-nrv4h 2/2 Running 3 18h metrics-api-77f684f7c7-hnw8r 2/2 Running 2 18h @@ -1712,7 +1712,7 @@ Make sure that the `proxy-injector` is working correctly by running Ensure all the linkerd-viz pods are running with 2/2 ```bash -kubectl -n linkerd-viz get pods +$ kubectl -n linkerd-viz get pods NAME READY STATUS RESTARTS AGE grafana-68cddd7cc8-nrv4h 2/2 Running 3 18h metrics-api-77f684f7c7-hnw8r 2/2 Running 2 18h @@ -1757,7 +1757,7 @@ Example failure: Verify that the metrics API pod is running correctly ```bash -❯ kubectl -n linkerd-viz get pods +$ kubectl -n linkerd-viz get pods NAME READY STATUS RESTARTS AGE metrics-api-7bb8cb8489-cbq4m 2/2 Running 0 4m58s tap-injector-6b9bc6fc4-cgbr4 2/2 Running 0 4m56s @@ -1895,7 +1895,7 @@ versions in sync by updating either the CLI or linkerd-jaeger as necessary. Ensure all the jaeger pods are injected ```bash -kubectl -n linkerd-jaeger get pods +$ kubectl -n linkerd-jaeger get pods NAME READY STATUS RESTARTS AGE collector-69cc44dfbc-rhpfg 2/2 Running 0 11s jaeger-6f98d5c979-scqlq 2/2 Running 0 11s @@ -2030,7 +2030,7 @@ linkerd-buoyant install | kubectl apply -f - Ensure that the cluster role exists: ```bash -kubectl get clusterrole buoyant-cloud-agent +$ kubectl get clusterrole buoyant-cloud-agent NAME CREATED AT buoyant-cloud-agent 2020-11-13T00:59:50Z ``` @@ -2038,7 +2038,7 @@ buoyant-cloud-agent 2020-11-13T00:59:50Z Also ensure you have permission to create ClusterRoles: ```bash -kubectl auth can-i create ClusterRoles +$ kubectl auth can-i create clusterroles yes ``` @@ -2053,7 +2053,7 @@ yes Ensure that the cluster role binding exists: ```bash -kubectl get clusterrolebinding buoyant-cloud-agent +$ kubectl get clusterrolebinding buoyant-cloud-agent NAME ROLE AGE buoyant-cloud-agent ClusterRole/buoyant-cloud-agent 301d ``` @@ -2061,7 +2061,7 @@ buoyant-cloud-agent ClusterRole/buoyant-cloud-agent 301d Also ensure you have permission to create ClusterRoleBindings: ```bash -kubectl auth can-i create ClusterRoleBindings +$ kubectl auth can-i create ClusterRoleBindings yes ``` @@ -2076,7 +2076,7 @@ yes Ensure that the service account exists: ```bash -kubectl -n buoyant-cloud get serviceaccount buoyant-cloud-agent +$ kubectl -n buoyant-cloud get serviceaccount buoyant-cloud-agent NAME SECRETS AGE buoyant-cloud-agent 1 301d ``` @@ -2084,7 +2084,7 @@ buoyant-cloud-agent 1 301d Also ensure you have permission to create ServiceAccounts: ```bash -kubectl -n buoyant-cloud auth can-i create ServiceAccount +$ kubectl -n buoyant-cloud auth can-i create ServiceAccount yes ``` @@ -2099,7 +2099,7 @@ yes Ensure that the secret exists: ```bash -kubectl -n buoyant-cloud get secret buoyant-cloud-id +$ kubectl -n buoyant-cloud get secret buoyant-cloud-id NAME TYPE DATA AGE buoyant-cloud-id Opaque 4 301d ``` @@ -2107,7 +2107,7 @@ buoyant-cloud-id Opaque 4 301d Also ensure you have permission to create ServiceAccounts: ```bash -kubectl -n buoyant-cloud auth can-i create ServiceAccount +$ kubectl -n buoyant-cloud auth can-i create ServiceAccount yes ``` @@ -2145,7 +2145,7 @@ everything to start up. If this is a permanent error, you'll want to validate the state of the `buoyant-cloud-agent` Deployment with: ```bash -kubectl -n buoyant-cloud get po --selector app=buoyant-cloud-agent +$ kubectl -n buoyant-cloud get po --selector app=buoyant-cloud-agent NAME READY STATUS RESTARTS AGE buoyant-cloud-agent-6b8c6888d7-htr7d 2/2 Running 0 156m ``` @@ -2168,7 +2168,7 @@ Ensure the `buoyant-cloud-agent` pod is injected, the `READY` column should show `2/2`: ```bash -kubectl -n buoyant-cloud get pods --selector app=buoyant-cloud-agent +$ kubectl -n buoyant-cloud get pods --selector app=buoyant-cloud-agent NAME READY STATUS RESTARTS AGE buoyant-cloud-agent-6b8c6888d7-htr7d 2/2 Running 0 161m ``` @@ -2187,7 +2187,7 @@ Make sure that the `proxy-injector` is working correctly by running Check the version with: ```bash -linkerd-buoyant version +$ linkerd-buoyant version CLI version: v0.4.4 Agent version: v0.4.4 ``` @@ -2246,7 +2246,7 @@ everything to start up. If this is a permanent error, you'll want to validate the state of the `buoyant-cloud-metrics` DaemonSet with: ```bash -kubectl -n buoyant-cloud get po --selector app=buoyant-cloud-metrics +$ kubectl -n buoyant-cloud get po --selector app=buoyant-cloud-metrics NAME READY STATUS RESTARTS AGE buoyant-cloud-metrics-kt9mv 2/2 Running 0 163m buoyant-cloud-metrics-q8jhj 2/2 Running 0 163m @@ -2294,7 +2294,7 @@ Make sure that the `proxy-injector` is working correctly by running Check the version with: ```bash -kubectl -n buoyant-cloud get daemonset/buoyant-cloud-metrics -o jsonpath='{.metadata.labels}' +$ kubectl -n buoyant-cloud get daemonset/buoyant-cloud-metrics -o jsonpath='{.metadata.labels}' {"app.kubernetes.io/name":"metrics","app.kubernetes.io/part-of":"buoyant-cloud","app.kubernetes.io/version":"v0.4.4"} ``` diff --git a/linkerd.io/content/2.16/tasks/troubleshooting.md b/linkerd.io/content/2.16/tasks/troubleshooting.md index 2c57453aa6..a0e5768590 100644 --- a/linkerd.io/content/2.16/tasks/troubleshooting.md +++ b/linkerd.io/content/2.16/tasks/troubleshooting.md @@ -230,7 +230,7 @@ Example failure: Ensure the Linkerd ClusterRoles exist: ```bash -kubectl get clusterroles | grep linkerd +$ kubectl get clusterroles | grep linkerd linkerd-linkerd-destination 9d linkerd-linkerd-identity 9d linkerd-linkerd-proxy-injector 9d @@ -240,7 +240,7 @@ linkerd-policy 9d Also ensure you have permission to create ClusterRoles: ```bash -kubectl auth can-i create clusterroles +$ kubectl auth can-i create clusterroles yes ``` @@ -257,7 +257,7 @@ Example failure: Ensure the Linkerd ClusterRoleBindings exist: ```bash -kubectl get clusterrolebindings | grep linkerd +$ kubectl get clusterrolebindings | grep linkerd linkerd-linkerd-destination 9d linkerd-linkerd-identity 9d linkerd-linkerd-proxy-injector 9d @@ -267,7 +267,7 @@ linkerd-destination-policy 9d Also ensure you have permission to create ClusterRoleBindings: ```bash -kubectl auth can-i create clusterrolebindings +$ kubectl auth can-i create clusterrolebindings yes ``` @@ -284,7 +284,7 @@ Example failure: Ensure the Linkerd ServiceAccounts exist: ```bash -kubectl -n linkerd get serviceaccounts +$ kubectl -n linkerd get serviceaccounts NAME SECRETS AGE default 1 14m linkerd-destination 1 14m @@ -297,7 +297,7 @@ Also ensure you have permission to create ServiceAccounts in the Linkerd namespace: ```bash -kubectl -n linkerd auth can-i create serviceaccounts +$ kubectl -n linkerd auth can-i create serviceaccounts yes ``` @@ -314,7 +314,7 @@ Example failure: Ensure the Linkerd CRD exists: ```bash -kubectl get customresourcedefinitions +$ kubectl get customresourcedefinitions NAME CREATED AT serviceprofiles.linkerd.io 2019-04-25T21:47:31Z ``` @@ -322,7 +322,7 @@ serviceprofiles.linkerd.io 2019-04-25T21:47:31Z Also ensure you have permission to create CRDs: ```bash -kubectl auth can-i create customresourcedefinitions +$ kubectl auth can-i create customresourcedefinitions yes ``` @@ -339,14 +339,14 @@ Example failure: Ensure the Linkerd MutatingWebhookConfigurations exists: ```bash -kubectl get mutatingwebhookconfigurations | grep linkerd +$ kubectl get mutatingwebhookconfigurations | grep linkerd linkerd-proxy-injector-webhook-config 2019-07-01T13:13:26Z ``` Also ensure you have permission to create MutatingWebhookConfigurations: ```bash -kubectl auth can-i create mutatingwebhookconfigurations +$ kubectl auth can-i create mutatingwebhookconfigurations yes ``` @@ -363,14 +363,14 @@ Example failure: Ensure the Linkerd ValidatingWebhookConfiguration exists: ```bash -kubectl get validatingwebhookconfigurations | grep linkerd +$ kubectl get validatingwebhookconfigurations | grep linkerd linkerd-sp-validator-webhook-config 2019-07-01T13:13:26Z ``` Also ensure you have permission to create ValidatingWebhookConfigurations: ```bash -kubectl auth can-i create validatingwebhookconfigurations +$ kubectl auth can-i create validatingwebhookconfigurations yes ``` @@ -418,7 +418,7 @@ Example failure: Ensure the Linkerd ConfigMap exists: ```bash -kubectl -n linkerd get configmap/linkerd-config +$ kubectl -n linkerd get configmap/linkerd-config NAME DATA AGE linkerd-config 3 61m ``` @@ -426,7 +426,7 @@ linkerd-config 3 61m Also ensure you have permission to create ConfigMaps: ```bash -kubectl -n linkerd auth can-i create configmap +$ kubectl -n linkerd auth can-i create configmap yes ``` @@ -780,7 +780,7 @@ Example failure: Verify the state of the control plane pods with: ```bash -kubectl -n linkerd get po +$ kubectl -n linkerd get po NAME READY STATUS RESTARTS AGE linkerd-destination-5fd7b5d466-szgqm 2/2 Running 1 12m linkerd-identity-54df78c479-hbh5m 2/2 Running 0 12m @@ -1296,7 +1296,7 @@ Make sure multicluster extension is correctly installed and that the `links.multicluster.linkerd.io` CRD is present. ```bash -kubectl get crds | grep multicluster +$ kubectl get crds | grep multicluster NAME CREATED AT links.multicluster.linkerd.io 2021-03-10T09:58:10Z ``` @@ -1375,7 +1375,7 @@ the rules section. Expected rules for `linkerd-service-mirror-access-local-resources` cluster role: ```bash -kubectl --context=local get clusterrole linkerd-service-mirror-access-local-resources -o yaml +$ kubectl --context=local get clusterrole linkerd-service-mirror-access-local-resources -o yaml kind: ClusterRole metadata: labels: @@ -1441,7 +1441,7 @@ everything to start up. If this is a permanent error, you'll want to validate the state of the controller pod with: ```bash -kubectl --all-namespaces get po --selector linkerd.io/control-plane-component=linkerd-service-mirror +$ kubectl --all-namespaces get po --selector linkerd.io/control-plane-component=linkerd-service-mirror NAME READY STATUS RESTARTS AGE linkerd-service-mirror-7bb8ff5967-zg265 2/2 Running 0 50m ``` @@ -1559,7 +1559,7 @@ Example failure: Ensure the linkerd-viz extension ClusterRoles exist: ```bash -kubectl get clusterroles | grep linkerd-viz +$ kubectl get clusterroles | grep linkerd-viz linkerd-linkerd-viz-metrics-api 2021-01-26T18:02:17Z linkerd-linkerd-viz-prometheus 2021-01-26T18:02:17Z linkerd-linkerd-viz-tap 2021-01-26T18:02:17Z @@ -1570,7 +1570,7 @@ linkerd-linkerd-viz-web-check 2021-01-2 Also ensure you have permission to create ClusterRoles: ```bash -kubectl auth can-i create clusterroles +$ kubectl auth can-i create clusterroles yes ``` @@ -1587,7 +1587,7 @@ Example failure: Ensure the linkerd-viz extension ClusterRoleBindings exist: ```bash -kubectl get clusterrolebindings | grep linkerd-viz +$ kubectl get clusterrolebindings | grep linkerd-viz linkerd-linkerd-viz-metrics-api ClusterRole/linkerd-linkerd-viz-metrics-api 18h linkerd-linkerd-viz-prometheus ClusterRole/linkerd-linkerd-viz-prometheus 18h linkerd-linkerd-viz-tap ClusterRole/linkerd-linkerd-viz-tap 18h @@ -1599,7 +1599,7 @@ linkerd-linkerd-viz-web-check ClusterRole/linkerd-linke Also ensure you have permission to create ClusterRoleBindings: ```bash -kubectl auth can-i create clusterrolebindings +$ kubectl auth can-i create clusterrolebindings yes ``` @@ -1688,7 +1688,7 @@ requirements in the cluster: Ensure all the linkerd-viz pods are injected ```bash -kubectl -n linkerd-viz get pods +$ kubectl -n linkerd-viz get pods NAME READY STATUS RESTARTS AGE grafana-68cddd7cc8-nrv4h 2/2 Running 3 18h metrics-api-77f684f7c7-hnw8r 2/2 Running 2 18h @@ -1712,7 +1712,7 @@ Make sure that the `proxy-injector` is working correctly by running Ensure all the linkerd-viz pods are running with 2/2 ```bash -kubectl -n linkerd-viz get pods +$ kubectl -n linkerd-viz get pods NAME READY STATUS RESTARTS AGE grafana-68cddd7cc8-nrv4h 2/2 Running 3 18h metrics-api-77f684f7c7-hnw8r 2/2 Running 2 18h @@ -1736,12 +1736,12 @@ Make sure that the `proxy-injector` is working correctly by running Ensure all the prometheus related resources are present and running correctly. ```bash -❯ kubectl -n linkerd-viz get deploy,cm | grep prometheus +$ kubectl -n linkerd-viz get deploy,cm | grep prometheus deployment.apps/prometheus 1/1 1 1 3m18s configmap/prometheus-config 1 3m18s -❯ kubectl get clusterRoleBindings | grep prometheus +$ kubectl get clusterRoleBindings | grep prometheus linkerd-linkerd-viz-prometheus ClusterRole/linkerd-linkerd-viz-prometheus 3m37s -❯ kubectl get clusterRoles | grep prometheus +$ kubectl get clusterRoles | grep prometheus linkerd-linkerd-viz-prometheus 2021-02-26T06:03:11Zh ``` @@ -1757,7 +1757,7 @@ Example failure: Verify that the metrics API pod is running correctly ```bash -❯ kubectl -n linkerd-viz get pods +$ kubectl -n linkerd-viz get pods NAME READY STATUS RESTARTS AGE metrics-api-7bb8cb8489-cbq4m 2/2 Running 0 4m58s tap-injector-6b9bc6fc4-cgbr4 2/2 Running 0 4m56s @@ -1895,7 +1895,7 @@ versions in sync by updating either the CLI or linkerd-jaeger as necessary. Ensure all the jaeger pods are injected ```bash -kubectl -n linkerd-jaeger get pods +$ kubectl -n linkerd-jaeger get pods NAME READY STATUS RESTARTS AGE collector-69cc44dfbc-rhpfg 2/2 Running 0 11s jaeger-6f98d5c979-scqlq 2/2 Running 0 11s @@ -1916,7 +1916,7 @@ Make sure that the `proxy-injector` is working correctly by running Ensure all the linkerd-jaeger pods are running with 2/2 ```bash -kubectl -n linkerd-jaeger get pods +$ kubectl -n linkerd-jaeger get pods NAME READY STATUS RESTARTS AGE jaeger-injector-548684d74b-bcq5h 2/2 Running 0 5s collector-69cc44dfbc-wqf6s 2/2 Running 0 5s @@ -1965,7 +1965,7 @@ Ensure you can connect to the Linkerd Buoyant version check endpoint from the environment the `linkerd` cli is running: ```bash -curl https://buoyant.cloud/version.json +$ curl https://buoyant.cloud/version.json {"linkerd-buoyant":"v0.4.4"} ``` @@ -2030,7 +2030,7 @@ linkerd-buoyant install | kubectl apply -f - Ensure that the cluster role exists: ```bash -kubectl get clusterrole buoyant-cloud-agent +$ kubectl get clusterrole buoyant-cloud-agent NAME CREATED AT buoyant-cloud-agent 2020-11-13T00:59:50Z ``` @@ -2038,7 +2038,7 @@ buoyant-cloud-agent 2020-11-13T00:59:50Z Also ensure you have permission to create ClusterRoles: ```bash -kubectl auth can-i create ClusterRoles +$ kubectl auth can-i create clusterroles yes ``` @@ -2053,7 +2053,7 @@ yes Ensure that the cluster role binding exists: ```bash -kubectl get clusterrolebinding buoyant-cloud-agent +$ kubectl get clusterrolebinding buoyant-cloud-agent NAME ROLE AGE buoyant-cloud-agent ClusterRole/buoyant-cloud-agent 301d ``` @@ -2061,7 +2061,7 @@ buoyant-cloud-agent ClusterRole/buoyant-cloud-agent 301d Also ensure you have permission to create ClusterRoleBindings: ```bash -kubectl auth can-i create ClusterRoleBindings +$ kubectl auth can-i create ClusterRoleBindings yes ``` @@ -2076,7 +2076,7 @@ yes Ensure that the service account exists: ```bash -kubectl -n buoyant-cloud get serviceaccount buoyant-cloud-agent +$ kubectl -n buoyant-cloud get serviceaccount buoyant-cloud-agent NAME SECRETS AGE buoyant-cloud-agent 1 301d ``` @@ -2084,7 +2084,7 @@ buoyant-cloud-agent 1 301d Also ensure you have permission to create ServiceAccounts: ```bash -kubectl -n buoyant-cloud auth can-i create ServiceAccount +$ kubectl -n buoyant-cloud auth can-i create ServiceAccount yes ``` @@ -2099,7 +2099,7 @@ yes Ensure that the secret exists: ```bash -kubectl -n buoyant-cloud get secret buoyant-cloud-id +$ kubectl -n buoyant-cloud get secret buoyant-cloud-id NAME TYPE DATA AGE buoyant-cloud-id Opaque 4 301d ``` @@ -2107,7 +2107,7 @@ buoyant-cloud-id Opaque 4 301d Also ensure you have permission to create ServiceAccounts: ```bash -kubectl -n buoyant-cloud auth can-i create ServiceAccount +$ kubectl -n buoyant-cloud auth can-i create ServiceAccount yes ``` @@ -2122,14 +2122,14 @@ yes Ensure the `buoyant-cloud-agent` Deployment exists: ```bash -kubectl -n buoyant-cloud get deploy/buoyant-cloud-agent +$ kubectl -n buoyant-cloud get deploy/buoyant-cloud-agent ``` If the Deployment does not exist, the `linkerd-buoyant` installation may be missing or incomplete. To reinstall the extension: ```bash -linkerd-buoyant install | kubectl apply -f - +$ linkerd-buoyant install | kubectl apply -f - ``` ### √ buoyant-cloud-agent Deployment is running @@ -2145,7 +2145,7 @@ everything to start up. If this is a permanent error, you'll want to validate the state of the `buoyant-cloud-agent` Deployment with: ```bash -kubectl -n buoyant-cloud get po --selector app=buoyant-cloud-agent +$ kubectl -n buoyant-cloud get po --selector app=buoyant-cloud-agent NAME READY STATUS RESTARTS AGE buoyant-cloud-agent-6b8c6888d7-htr7d 2/2 Running 0 156m ``` @@ -2168,7 +2168,7 @@ Ensure the `buoyant-cloud-agent` pod is injected, the `READY` column should show `2/2`: ```bash -kubectl -n buoyant-cloud get pods --selector app=buoyant-cloud-agent +$ kubectl -n buoyant-cloud get pods --selector app=buoyant-cloud-agent NAME READY STATUS RESTARTS AGE buoyant-cloud-agent-6b8c6888d7-htr7d 2/2 Running 0 161m ``` @@ -2187,7 +2187,7 @@ Make sure that the `proxy-injector` is working correctly by running Check the version with: ```bash -linkerd-buoyant version +$ linkerd-buoyant version CLI version: v0.4.4 Agent version: v0.4.4 ``` @@ -2195,7 +2195,7 @@ Agent version: v0.4.4 To update to the latest version: ```bash -linkerd-buoyant install | kubectl apply -f - +$ linkerd-buoyant install | kubectl apply -f - ``` ### √ buoyant-cloud-agent Deployment is running a single pod @@ -2209,7 +2209,7 @@ linkerd-buoyant install | kubectl apply -f - `buoyant-cloud-agent` should run as a singleton. Check for other pods: ```bash -kubectl get po -A --selector app=buoyant-cloud-agent +$ kubectl get po -A --selector app=buoyant-cloud-agent ``` ### √ buoyant-cloud-metrics DaemonSet exists @@ -2223,14 +2223,14 @@ kubectl get po -A --selector app=buoyant-cloud-agent Ensure the `buoyant-cloud-metrics` DaemonSet exists: ```bash -kubectl -n buoyant-cloud get daemonset/buoyant-cloud-metrics +$ kubectl -n buoyant-cloud get daemonset/buoyant-cloud-metrics ``` If the DaemonSet does not exist, the `linkerd-buoyant` installation may be missing or incomplete. To reinstall the extension: ```bash -linkerd-buoyant install | kubectl apply -f - +$ linkerd-buoyant install | kubectl apply -f - ``` ### √ buoyant-cloud-metrics DaemonSet is running @@ -2246,7 +2246,7 @@ everything to start up. If this is a permanent error, you'll want to validate the state of the `buoyant-cloud-metrics` DaemonSet with: ```bash -kubectl -n buoyant-cloud get po --selector app=buoyant-cloud-metrics +$ kubectl -n buoyant-cloud get po --selector app=buoyant-cloud-metrics NAME READY STATUS RESTARTS AGE buoyant-cloud-metrics-kt9mv 2/2 Running 0 163m buoyant-cloud-metrics-q8jhj 2/2 Running 0 163m @@ -2272,7 +2272,7 @@ Ensure the `buoyant-cloud-metrics` pods are injected, the `READY` column should show `2/2`: ```bash -kubectl -n buoyant-cloud get pods --selector app=buoyant-cloud-metrics +$ kubectl -n buoyant-cloud get pods --selector app=buoyant-cloud-metrics NAME READY STATUS RESTARTS AGE buoyant-cloud-metrics-kt9mv 2/2 Running 0 166m buoyant-cloud-metrics-q8jhj 2/2 Running 0 166m @@ -2294,7 +2294,7 @@ Make sure that the `proxy-injector` is working correctly by running Check the version with: ```bash -kubectl -n buoyant-cloud get daemonset/buoyant-cloud-metrics -o jsonpath='{.metadata.labels}' +$ kubectl -n buoyant-cloud get daemonset/buoyant-cloud-metrics -o jsonpath='{.metadata.labels}' {"app.kubernetes.io/name":"metrics","app.kubernetes.io/part-of":"buoyant-cloud","app.kubernetes.io/version":"v0.4.4"} ``` diff --git a/linkerd.io/content/2.17/tasks/troubleshooting.md b/linkerd.io/content/2.17/tasks/troubleshooting.md index 79bacd3f7b..7278f40c7b 100644 --- a/linkerd.io/content/2.17/tasks/troubleshooting.md +++ b/linkerd.io/content/2.17/tasks/troubleshooting.md @@ -230,7 +230,7 @@ Example failure: Ensure the Linkerd ClusterRoles exist: ```bash -kubectl get clusterroles | grep linkerd +$ kubectl get clusterroles | grep linkerd linkerd-linkerd-destination 9d linkerd-linkerd-identity 9d linkerd-linkerd-proxy-injector 9d @@ -240,7 +240,7 @@ linkerd-policy 9d Also ensure you have permission to create ClusterRoles: ```bash -kubectl auth can-i create clusterroles +$ kubectl auth can-i create clusterroles yes ``` @@ -257,7 +257,7 @@ Example failure: Ensure the Linkerd ClusterRoleBindings exist: ```bash -kubectl get clusterrolebindings | grep linkerd +$ kubectl get clusterrolebindings | grep linkerd linkerd-linkerd-destination 9d linkerd-linkerd-identity 9d linkerd-linkerd-proxy-injector 9d @@ -267,7 +267,7 @@ linkerd-destination-policy 9d Also ensure you have permission to create ClusterRoleBindings: ```bash -kubectl auth can-i create clusterrolebindings +$ kubectl auth can-i create clusterrolebindings yes ``` @@ -284,7 +284,7 @@ Example failure: Ensure the Linkerd ServiceAccounts exist: ```bash -kubectl -n linkerd get serviceaccounts +$ kubectl -n linkerd get serviceaccounts NAME SECRETS AGE default 1 14m linkerd-destination 1 14m @@ -297,7 +297,7 @@ Also ensure you have permission to create ServiceAccounts in the Linkerd namespace: ```bash -kubectl -n linkerd auth can-i create serviceaccounts +$ kubectl -n linkerd auth can-i create serviceaccounts yes ``` @@ -314,7 +314,7 @@ Example failure: Ensure the Linkerd CRD exists: ```bash -kubectl get customresourcedefinitions +$ kubectl get customresourcedefinitions NAME CREATED AT serviceprofiles.linkerd.io 2019-04-25T21:47:31Z ``` @@ -322,7 +322,7 @@ serviceprofiles.linkerd.io 2019-04-25T21:47:31Z Also ensure you have permission to create CRDs: ```bash -kubectl auth can-i create customresourcedefinitions +$ kubectl auth can-i create customresourcedefinitions yes ``` @@ -339,14 +339,14 @@ Example failure: Ensure the Linkerd MutatingWebhookConfigurations exists: ```bash -kubectl get mutatingwebhookconfigurations | grep linkerd +$ kubectl get mutatingwebhookconfigurations | grep linkerd linkerd-proxy-injector-webhook-config 2019-07-01T13:13:26Z ``` Also ensure you have permission to create MutatingWebhookConfigurations: ```bash -kubectl auth can-i create mutatingwebhookconfigurations +$ kubectl auth can-i create mutatingwebhookconfigurations yes ``` @@ -363,14 +363,14 @@ Example failure: Ensure the Linkerd ValidatingWebhookConfiguration exists: ```bash -kubectl get validatingwebhookconfigurations | grep linkerd +$ kubectl get validatingwebhookconfigurations | grep linkerd linkerd-sp-validator-webhook-config 2019-07-01T13:13:26Z ``` Also ensure you have permission to create ValidatingWebhookConfigurations: ```bash -kubectl auth can-i create validatingwebhookconfigurations +$ kubectl auth can-i create validatingwebhookconfigurations yes ``` @@ -418,7 +418,7 @@ Example failure: Ensure the Linkerd ConfigMap exists: ```bash -kubectl -n linkerd get configmap/linkerd-config +$ kubectl -n linkerd get configmap/linkerd-config NAME DATA AGE linkerd-config 3 61m ``` @@ -426,7 +426,7 @@ linkerd-config 3 61m Also ensure you have permission to create ConfigMaps: ```bash -kubectl -n linkerd auth can-i create configmap +$ kubectl -n linkerd auth can-i create configmap yes ``` @@ -780,7 +780,7 @@ Example failure: Verify the state of the control plane pods with: ```bash -kubectl -n linkerd get po +$ kubectl -n linkerd get po NAME READY STATUS RESTARTS AGE linkerd-destination-5fd7b5d466-szgqm 2/2 Running 1 12m linkerd-identity-54df78c479-hbh5m 2/2 Running 0 12m @@ -1310,7 +1310,7 @@ Make sure multicluster extension is correctly installed and that the `links.multicluster.linkerd.io` CRD is present. ```bash -kubectl get crds | grep multicluster +$ kubectl get crds | grep multicluster NAME CREATED AT links.multicluster.linkerd.io 2021-03-10T09:58:10Z ``` @@ -1400,7 +1400,7 @@ the rules section. Expected rules for `linkerd-service-mirror-access-local-resources` cluster role: ```bash -kubectl --context=local get clusterrole linkerd-service-mirror-access-local-resources -o yaml +$ kubectl --context=local get clusterrole linkerd-service-mirror-access-local-resources -o yaml kind: ClusterRole metadata: labels: @@ -1466,7 +1466,7 @@ everything to start up. If this is a permanent error, you'll want to validate the state of the controller pod with: ```bash -kubectl --all-namespaces get po --selector linkerd.io/control-plane-component=linkerd-service-mirror +$ kubectl --all-namespaces get po --selector linkerd.io/control-plane-component=linkerd-service-mirror NAME READY STATUS RESTARTS AGE linkerd-service-mirror-7bb8ff5967-zg265 2/2 Running 0 50m ``` @@ -1584,7 +1584,7 @@ Example failure: Ensure the linkerd-viz extension ClusterRoles exist: ```bash -kubectl get clusterroles | grep linkerd-viz +$ kubectl get clusterroles | grep linkerd-viz linkerd-linkerd-viz-metrics-api 2021-01-26T18:02:17Z linkerd-linkerd-viz-prometheus 2021-01-26T18:02:17Z linkerd-linkerd-viz-tap 2021-01-26T18:02:17Z @@ -1612,7 +1612,7 @@ Example failure: Ensure the linkerd-viz extension ClusterRoleBindings exist: ```bash -kubectl get clusterrolebindings | grep linkerd-viz +$ kubectl get clusterrolebindings | grep linkerd-viz linkerd-linkerd-viz-metrics-api ClusterRole/linkerd-linkerd-viz-metrics-api 18h linkerd-linkerd-viz-prometheus ClusterRole/linkerd-linkerd-viz-prometheus 18h linkerd-linkerd-viz-tap ClusterRole/linkerd-linkerd-viz-tap 18h @@ -1624,7 +1624,7 @@ linkerd-linkerd-viz-web-check ClusterRole/linkerd-linke Also ensure you have permission to create ClusterRoleBindings: ```bash -kubectl auth can-i create clusterrolebindings +$ kubectl auth can-i create clusterrolebindings yes ``` @@ -1713,7 +1713,7 @@ requirements in the cluster: Ensure all the linkerd-viz pods are injected ```bash -kubectl -n linkerd-viz get pods +$ kubectl -n linkerd-viz get pods NAME READY STATUS RESTARTS AGE grafana-68cddd7cc8-nrv4h 2/2 Running 3 18h metrics-api-77f684f7c7-hnw8r 2/2 Running 2 18h @@ -1737,7 +1737,7 @@ Make sure that the `proxy-injector` is working correctly by running Ensure all the linkerd-viz pods are running with 2/2 ```bash -kubectl -n linkerd-viz get pods +$ kubectl -n linkerd-viz get pods NAME READY STATUS RESTARTS AGE grafana-68cddd7cc8-nrv4h 2/2 Running 3 18h metrics-api-77f684f7c7-hnw8r 2/2 Running 2 18h @@ -1761,12 +1761,12 @@ Make sure that the `proxy-injector` is working correctly by running Ensure all the prometheus related resources are present and running correctly. ```bash -❯ kubectl -n linkerd-viz get deploy,cm | grep prometheus +$ kubectl -n linkerd-viz get deploy,cm | grep prometheus deployment.apps/prometheus 1/1 1 1 3m18s configmap/prometheus-config 1 3m18s -❯ kubectl get clusterRoleBindings | grep prometheus +$ kubectl get clusterRoleBindings | grep prometheus linkerd-linkerd-viz-prometheus ClusterRole/linkerd-linkerd-viz-prometheus 3m37s -❯ kubectl get clusterRoles | grep prometheus +$ kubectl get clusterRoles | grep prometheus linkerd-linkerd-viz-prometheus 2021-02-26T06:03:11Zh ``` @@ -1782,7 +1782,7 @@ Example failure: Verify that the metrics API pod is running correctly ```bash -❯ kubectl -n linkerd-viz get pods +$ kubectl -n linkerd-viz get pods NAME READY STATUS RESTARTS AGE metrics-api-7bb8cb8489-cbq4m 2/2 Running 0 4m58s tap-injector-6b9bc6fc4-cgbr4 2/2 Running 0 4m56s @@ -1920,7 +1920,7 @@ versions in sync by updating either the CLI or linkerd-jaeger as necessary. Ensure all the jaeger pods are injected ```bash -kubectl -n linkerd-jaeger get pods +$ kubectl -n linkerd-jaeger get pods NAME READY STATUS RESTARTS AGE collector-69cc44dfbc-rhpfg 2/2 Running 0 11s jaeger-6f98d5c979-scqlq 2/2 Running 0 11s @@ -1990,7 +1990,7 @@ Ensure you can connect to the Linkerd Buoyant version check endpoint from the environment the `linkerd` cli is running: ```bash -curl https://buoyant.cloud/version.json +$ curl https://buoyant.cloud/version.json {"linkerd-buoyant":"v0.4.4"} ``` @@ -2055,7 +2055,7 @@ linkerd-buoyant install | kubectl apply -f - Ensure that the cluster role exists: ```bash -kubectl get clusterrole buoyant-cloud-agent +$ kubectl get clusterrole buoyant-cloud-agent NAME CREATED AT buoyant-cloud-agent 2020-11-13T00:59:50Z ``` @@ -2063,7 +2063,7 @@ buoyant-cloud-agent 2020-11-13T00:59:50Z Also ensure you have permission to create ClusterRoles: ```bash -kubectl auth can-i create ClusterRoles +$ kubectl auth can-i create clusterroles yes ``` @@ -2078,7 +2078,7 @@ yes Ensure that the cluster role binding exists: ```bash -kubectl get clusterrolebinding buoyant-cloud-agent +$ kubectl get clusterrolebinding buoyant-cloud-agent NAME ROLE AGE buoyant-cloud-agent ClusterRole/buoyant-cloud-agent 301d ``` @@ -2086,7 +2086,7 @@ buoyant-cloud-agent ClusterRole/buoyant-cloud-agent 301d Also ensure you have permission to create ClusterRoleBindings: ```bash -kubectl auth can-i create ClusterRoleBindings +$ kubectl auth can-i create ClusterRoleBindings yes ``` @@ -2101,7 +2101,7 @@ yes Ensure that the service account exists: ```bash -kubectl -n buoyant-cloud get serviceaccount buoyant-cloud-agent +$ kubectl -n buoyant-cloud get serviceaccount buoyant-cloud-agent NAME SECRETS AGE buoyant-cloud-agent 1 301d ``` @@ -2109,7 +2109,7 @@ buoyant-cloud-agent 1 301d Also ensure you have permission to create ServiceAccounts: ```bash -kubectl -n buoyant-cloud auth can-i create ServiceAccount +$ kubectl -n buoyant-cloud auth can-i create ServiceAccount yes ``` @@ -2124,7 +2124,7 @@ yes Ensure that the secret exists: ```bash -kubectl -n buoyant-cloud get secret buoyant-cloud-id +$ kubectl -n buoyant-cloud get secret buoyant-cloud-id NAME TYPE DATA AGE buoyant-cloud-id Opaque 4 301d ``` @@ -2132,7 +2132,7 @@ buoyant-cloud-id Opaque 4 301d Also ensure you have permission to create ServiceAccounts: ```bash -kubectl -n buoyant-cloud auth can-i create ServiceAccount +$ kubectl -n buoyant-cloud auth can-i create ServiceAccount yes ``` @@ -2147,14 +2147,14 @@ yes Ensure the `buoyant-cloud-agent` Deployment exists: ```bash -kubectl -n buoyant-cloud get deploy/buoyant-cloud-agent +$ kubectl -n buoyant-cloud get deploy/buoyant-cloud-agent ``` If the Deployment does not exist, the `linkerd-buoyant` installation may be missing or incomplete. To reinstall the extension: ```bash -linkerd-buoyant install | kubectl apply -f - +$ linkerd-buoyant install | kubectl apply -f - ``` ### √ buoyant-cloud-agent Deployment is running @@ -2170,7 +2170,7 @@ everything to start up. If this is a permanent error, you'll want to validate the state of the `buoyant-cloud-agent` Deployment with: ```bash -kubectl -n buoyant-cloud get po --selector app=buoyant-cloud-agent +$ kubectl -n buoyant-cloud get po --selector app=buoyant-cloud-agent NAME READY STATUS RESTARTS AGE buoyant-cloud-agent-6b8c6888d7-htr7d 2/2 Running 0 156m ``` @@ -2193,7 +2193,7 @@ Ensure the `buoyant-cloud-agent` pod is injected, the `READY` column should show `2/2`: ```bash -kubectl -n buoyant-cloud get pods --selector app=buoyant-cloud-agent +$ kubectl -n buoyant-cloud get pods --selector app=buoyant-cloud-agent NAME READY STATUS RESTARTS AGE buoyant-cloud-agent-6b8c6888d7-htr7d 2/2 Running 0 161m ``` @@ -2212,7 +2212,7 @@ Make sure that the `proxy-injector` is working correctly by running Check the version with: ```bash -linkerd-buoyant version +$ linkerd-buoyant version CLI version: v0.4.4 Agent version: v0.4.4 ``` @@ -2220,7 +2220,7 @@ Agent version: v0.4.4 To update to the latest version: ```bash -linkerd-buoyant install | kubectl apply -f - +$ linkerd-buoyant install | kubectl apply -f - ``` ### √ buoyant-cloud-agent Deployment is running a single pod @@ -2234,7 +2234,7 @@ linkerd-buoyant install | kubectl apply -f - `buoyant-cloud-agent` should run as a singleton. Check for other pods: ```bash -kubectl get po -A --selector app=buoyant-cloud-agent +$ kubectl get po -A --selector app=buoyant-cloud-agent ``` ### √ buoyant-cloud-metrics DaemonSet exists @@ -2248,14 +2248,14 @@ kubectl get po -A --selector app=buoyant-cloud-agent Ensure the `buoyant-cloud-metrics` DaemonSet exists: ```bash -kubectl -n buoyant-cloud get daemonset/buoyant-cloud-metrics +$ kubectl -n buoyant-cloud get daemonset/buoyant-cloud-metrics ``` If the DaemonSet does not exist, the `linkerd-buoyant` installation may be missing or incomplete. To reinstall the extension: ```bash -linkerd-buoyant install | kubectl apply -f - +$ linkerd-buoyant install | kubectl apply -f - ``` ### √ buoyant-cloud-metrics DaemonSet is running @@ -2271,7 +2271,7 @@ everything to start up. If this is a permanent error, you'll want to validate the state of the `buoyant-cloud-metrics` DaemonSet with: ```bash -kubectl -n buoyant-cloud get po --selector app=buoyant-cloud-metrics +$ kubectl -n buoyant-cloud get po --selector app=buoyant-cloud-metrics NAME READY STATUS RESTARTS AGE buoyant-cloud-metrics-kt9mv 2/2 Running 0 163m buoyant-cloud-metrics-q8jhj 2/2 Running 0 163m @@ -2297,7 +2297,7 @@ Ensure the `buoyant-cloud-metrics` pods are injected, the `READY` column should show `2/2`: ```bash -kubectl -n buoyant-cloud get pods --selector app=buoyant-cloud-metrics +$ kubectl -n buoyant-cloud get pods --selector app=buoyant-cloud-metrics NAME READY STATUS RESTARTS AGE buoyant-cloud-metrics-kt9mv 2/2 Running 0 166m buoyant-cloud-metrics-q8jhj 2/2 Running 0 166m @@ -2319,7 +2319,7 @@ Make sure that the `proxy-injector` is working correctly by running Check the version with: ```bash -kubectl -n buoyant-cloud get daemonset/buoyant-cloud-metrics -o jsonpath='{.metadata.labels}' +$ kubectl -n buoyant-cloud get daemonset/buoyant-cloud-metrics -o jsonpath='{.metadata.labels}' {"app.kubernetes.io/name":"metrics","app.kubernetes.io/part-of":"buoyant-cloud","app.kubernetes.io/version":"v0.4.4"} ``` diff --git a/linkerd.io/content/2.18/tasks/troubleshooting.md b/linkerd.io/content/2.18/tasks/troubleshooting.md index 1fdeb9710b..a1a6ab73af 100644 --- a/linkerd.io/content/2.18/tasks/troubleshooting.md +++ b/linkerd.io/content/2.18/tasks/troubleshooting.md @@ -230,7 +230,7 @@ Example failure: Ensure the Linkerd ClusterRoles exist: ```bash -kubectl get clusterroles | grep linkerd +$ kubectl get clusterroles | grep linkerd linkerd-linkerd-destination 9d linkerd-linkerd-identity 9d linkerd-linkerd-proxy-injector 9d @@ -240,7 +240,7 @@ linkerd-policy 9d Also ensure you have permission to create ClusterRoles: ```bash -kubectl auth can-i create clusterroles +$ kubectl auth can-i create clusterroles yes ``` @@ -257,7 +257,7 @@ Example failure: Ensure the Linkerd ClusterRoleBindings exist: ```bash -kubectl get clusterrolebindings | grep linkerd +$ kubectl get clusterrolebindings | grep linkerd linkerd-linkerd-destination 9d linkerd-linkerd-identity 9d linkerd-linkerd-proxy-injector 9d @@ -267,7 +267,7 @@ linkerd-destination-policy 9d Also ensure you have permission to create ClusterRoleBindings: ```bash -kubectl auth can-i create clusterrolebindings +$ kubectl auth can-i create clusterrolebindings yes ``` @@ -284,7 +284,7 @@ Example failure: Ensure the Linkerd ServiceAccounts exist: ```bash -kubectl -n linkerd get serviceaccounts +$ kubectl -n linkerd get serviceaccounts NAME SECRETS AGE default 1 14m linkerd-destination 1 14m @@ -297,7 +297,7 @@ Also ensure you have permission to create ServiceAccounts in the Linkerd namespace: ```bash -kubectl -n linkerd auth can-i create serviceaccounts +$ kubectl -n linkerd auth can-i create serviceaccounts yes ``` @@ -314,7 +314,7 @@ Example failure: Ensure the Linkerd CRD exists: ```bash -kubectl get customresourcedefinitions +$ kubectl get customresourcedefinitions NAME CREATED AT serviceprofiles.linkerd.io 2019-04-25T21:47:31Z ``` @@ -322,7 +322,7 @@ serviceprofiles.linkerd.io 2019-04-25T21:47:31Z Also ensure you have permission to create CRDs: ```bash -kubectl auth can-i create customresourcedefinitions +$ kubectl auth can-i create customresourcedefinitions yes ``` @@ -339,14 +339,14 @@ Example failure: Ensure the Linkerd MutatingWebhookConfigurations exists: ```bash -kubectl get mutatingwebhookconfigurations | grep linkerd +$ kubectl get mutatingwebhookconfigurations | grep linkerd linkerd-proxy-injector-webhook-config 2019-07-01T13:13:26Z ``` Also ensure you have permission to create MutatingWebhookConfigurations: ```bash -kubectl auth can-i create mutatingwebhookconfigurations +$ kubectl auth can-i create mutatingwebhookconfigurations yes ``` @@ -363,14 +363,14 @@ Example failure: Ensure the Linkerd ValidatingWebhookConfiguration exists: ```bash -kubectl get validatingwebhookconfigurations | grep linkerd +$ kubectl get validatingwebhookconfigurations | grep linkerd linkerd-sp-validator-webhook-config 2019-07-01T13:13:26Z ``` Also ensure you have permission to create ValidatingWebhookConfigurations: ```bash -kubectl auth can-i create validatingwebhookconfigurations +$ kubectl auth can-i create validatingwebhookconfigurations yes ``` @@ -418,7 +418,7 @@ Example failure: Ensure the Linkerd ConfigMap exists: ```bash -kubectl -n linkerd get configmap/linkerd-config +$ kubectl -n linkerd get configmap/linkerd-config NAME DATA AGE linkerd-config 3 61m ``` @@ -426,7 +426,7 @@ linkerd-config 3 61m Also ensure you have permission to create ConfigMaps: ```bash -kubectl -n linkerd auth can-i create configmap +$ kubectl -n linkerd auth can-i create configmap yes ``` @@ -780,7 +780,7 @@ Example failure: Verify the state of the control plane pods with: ```bash -kubectl -n linkerd get po +$ kubectl -n linkerd get po NAME READY STATUS RESTARTS AGE linkerd-destination-5fd7b5d466-szgqm 2/2 Running 1 12m linkerd-identity-54df78c479-hbh5m 2/2 Running 0 12m @@ -1310,7 +1310,7 @@ Make sure multicluster extension is correctly installed and that the `links.multicluster.linkerd.io` CRD is present. ```bash -kubectl get crds | grep multicluster +$ kubectl get crds | grep multicluster NAME CREATED AT links.multicluster.linkerd.io 2021-03-10T09:58:10Z ``` @@ -1400,7 +1400,7 @@ the rules section. Expected rules for `linkerd-service-mirror-access-local-resources` cluster role: ```bash -kubectl --context=local get clusterrole linkerd-service-mirror-access-local-resources -o yaml +$ kubectl --context=local get clusterrole linkerd-service-mirror-access-local-resources -o yaml kind: ClusterRole metadata: labels: @@ -1466,7 +1466,7 @@ everything to start up. If this is a permanent error, you'll want to validate the state of the controller pod with: ```bash -kubectl --all-namespaces get po --selector linkerd.io/control-plane-component=linkerd-service-mirror +$ kubectl --all-namespaces get po --selector linkerd.io/control-plane-component=linkerd-service-mirror NAME READY STATUS RESTARTS AGE linkerd-service-mirror-7bb8ff5967-zg265 2/2 Running 0 50m ``` @@ -1612,7 +1612,7 @@ Example failure: Ensure the linkerd-viz extension ClusterRoles exist: ```bash -kubectl get clusterroles | grep linkerd-viz +$ kubectl get clusterroles | grep linkerd-viz linkerd-linkerd-viz-metrics-api 2021-01-26T18:02:17Z linkerd-linkerd-viz-prometheus 2021-01-26T18:02:17Z linkerd-linkerd-viz-tap 2021-01-26T18:02:17Z @@ -1640,7 +1640,7 @@ Example failure: Ensure the linkerd-viz extension ClusterRoleBindings exist: ```bash -kubectl get clusterrolebindings | grep linkerd-viz +$ kubectl get clusterrolebindings | grep linkerd-viz linkerd-linkerd-viz-metrics-api ClusterRole/linkerd-linkerd-viz-metrics-api 18h linkerd-linkerd-viz-prometheus ClusterRole/linkerd-linkerd-viz-prometheus 18h linkerd-linkerd-viz-tap ClusterRole/linkerd-linkerd-viz-tap 18h @@ -1652,7 +1652,7 @@ linkerd-linkerd-viz-web-check ClusterRole/linkerd-linke Also ensure you have permission to create ClusterRoleBindings: ```bash -kubectl auth can-i create clusterrolebindings +$ kubectl auth can-i create clusterrolebindings yes ``` @@ -1741,7 +1741,7 @@ requirements in the cluster: Ensure all the linkerd-viz pods are injected ```bash -kubectl -n linkerd-viz get pods +$ kubectl -n linkerd-viz get pods NAME READY STATUS RESTARTS AGE grafana-68cddd7cc8-nrv4h 2/2 Running 3 18h metrics-api-77f684f7c7-hnw8r 2/2 Running 2 18h @@ -1765,7 +1765,7 @@ Make sure that the `proxy-injector` is working correctly by running Ensure all the linkerd-viz pods are running with 2/2 ```bash -kubectl -n linkerd-viz get pods +$ kubectl -n linkerd-viz get pods NAME READY STATUS RESTARTS AGE grafana-68cddd7cc8-nrv4h 2/2 Running 3 18h metrics-api-77f684f7c7-hnw8r 2/2 Running 2 18h @@ -1789,12 +1789,12 @@ Make sure that the `proxy-injector` is working correctly by running Ensure all the prometheus related resources are present and running correctly. ```bash -❯ kubectl -n linkerd-viz get deploy,cm | grep prometheus +$ kubectl -n linkerd-viz get deploy,cm | grep prometheus deployment.apps/prometheus 1/1 1 1 3m18s configmap/prometheus-config 1 3m18s -❯ kubectl get clusterRoleBindings | grep prometheus +$ kubectl get clusterRoleBindings | grep prometheus linkerd-linkerd-viz-prometheus ClusterRole/linkerd-linkerd-viz-prometheus 3m37s -❯ kubectl get clusterRoles | grep prometheus +$ kubectl get clusterRoles | grep prometheus linkerd-linkerd-viz-prometheus 2021-02-26T06:03:11Zh ``` @@ -1810,7 +1810,7 @@ Example failure: Verify that the metrics API pod is running correctly ```bash -❯ kubectl -n linkerd-viz get pods +$ kubectl -n linkerd-viz get pods NAME READY STATUS RESTARTS AGE metrics-api-7bb8cb8489-cbq4m 2/2 Running 0 4m58s tap-injector-6b9bc6fc4-cgbr4 2/2 Running 0 4m56s @@ -1948,7 +1948,7 @@ versions in sync by updating either the CLI or linkerd-jaeger as necessary. Ensure all the jaeger pods are injected ```bash -kubectl -n linkerd-jaeger get pods +$ kubectl -n linkerd-jaeger get pods NAME READY STATUS RESTARTS AGE collector-69cc44dfbc-rhpfg 2/2 Running 0 11s jaeger-6f98d5c979-scqlq 2/2 Running 0 11s @@ -1969,7 +1969,7 @@ Make sure that the `proxy-injector` is working correctly by running Ensure all the linkerd-jaeger pods are running with 2/2 ```bash -kubectl -n linkerd-jaeger get pods +$ kubectl -n linkerd-jaeger get pods NAME READY STATUS RESTARTS AGE jaeger-injector-548684d74b-bcq5h 2/2 Running 0 5s collector-69cc44dfbc-wqf6s 2/2 Running 0 5s @@ -2018,7 +2018,7 @@ Ensure you can connect to the Linkerd Buoyant version check endpoint from the environment the `linkerd` cli is running: ```bash -curl https://buoyant.cloud/version.json +$ curl https://buoyant.cloud/version.json {"linkerd-buoyant":"v0.4.4"} ``` @@ -2083,7 +2083,7 @@ linkerd-buoyant install | kubectl apply -f - Ensure that the cluster role exists: ```bash -kubectl get clusterrole buoyant-cloud-agent +$ kubectl get clusterrole buoyant-cloud-agent NAME CREATED AT buoyant-cloud-agent 2020-11-13T00:59:50Z ``` @@ -2091,7 +2091,7 @@ buoyant-cloud-agent 2020-11-13T00:59:50Z Also ensure you have permission to create ClusterRoles: ```bash -kubectl auth can-i create ClusterRoles +$ kubectl auth can-i create clusterroles yes ``` @@ -2106,7 +2106,7 @@ yes Ensure that the cluster role binding exists: ```bash -kubectl get clusterrolebinding buoyant-cloud-agent +$ kubectl get clusterrolebinding buoyant-cloud-agent NAME ROLE AGE buoyant-cloud-agent ClusterRole/buoyant-cloud-agent 301d ``` @@ -2114,7 +2114,7 @@ buoyant-cloud-agent ClusterRole/buoyant-cloud-agent 301d Also ensure you have permission to create ClusterRoleBindings: ```bash -kubectl auth can-i create ClusterRoleBindings +$ kubectl auth can-i create ClusterRoleBindings yes ``` @@ -2129,7 +2129,7 @@ yes Ensure that the service account exists: ```bash -kubectl -n buoyant-cloud get serviceaccount buoyant-cloud-agent +$ kubectl -n buoyant-cloud get serviceaccount buoyant-cloud-agent NAME SECRETS AGE buoyant-cloud-agent 1 301d ``` @@ -2137,7 +2137,7 @@ buoyant-cloud-agent 1 301d Also ensure you have permission to create ServiceAccounts: ```bash -kubectl -n buoyant-cloud auth can-i create ServiceAccount +$ kubectl -n buoyant-cloud auth can-i create ServiceAccount yes ``` @@ -2152,7 +2152,7 @@ yes Ensure that the secret exists: ```bash -kubectl -n buoyant-cloud get secret buoyant-cloud-id +$ kubectl -n buoyant-cloud get secret buoyant-cloud-id NAME TYPE DATA AGE buoyant-cloud-id Opaque 4 301d ``` @@ -2160,7 +2160,7 @@ buoyant-cloud-id Opaque 4 301d Also ensure you have permission to create ServiceAccounts: ```bash -kubectl -n buoyant-cloud auth can-i create ServiceAccount +$ kubectl -n buoyant-cloud auth can-i create ServiceAccount yes ``` @@ -2175,14 +2175,14 @@ yes Ensure the `buoyant-cloud-agent` Deployment exists: ```bash -kubectl -n buoyant-cloud get deploy/buoyant-cloud-agent +$ kubectl -n buoyant-cloud get deploy/buoyant-cloud-agent ``` If the Deployment does not exist, the `linkerd-buoyant` installation may be missing or incomplete. To reinstall the extension: ```bash -linkerd-buoyant install | kubectl apply -f - +$ linkerd-buoyant install | kubectl apply -f - ``` ### √ buoyant-cloud-agent Deployment is running @@ -2198,7 +2198,7 @@ everything to start up. If this is a permanent error, you'll want to validate the state of the `buoyant-cloud-agent` Deployment with: ```bash -kubectl -n buoyant-cloud get po --selector app=buoyant-cloud-agent +$ kubectl -n buoyant-cloud get po --selector app=buoyant-cloud-agent NAME READY STATUS RESTARTS AGE buoyant-cloud-agent-6b8c6888d7-htr7d 2/2 Running 0 156m ``` @@ -2221,7 +2221,7 @@ Ensure the `buoyant-cloud-agent` pod is injected, the `READY` column should show `2/2`: ```bash -kubectl -n buoyant-cloud get pods --selector app=buoyant-cloud-agent +$ kubectl -n buoyant-cloud get pods --selector app=buoyant-cloud-agent NAME READY STATUS RESTARTS AGE buoyant-cloud-agent-6b8c6888d7-htr7d 2/2 Running 0 161m ``` @@ -2240,7 +2240,7 @@ Make sure that the `proxy-injector` is working correctly by running Check the version with: ```bash -linkerd-buoyant version +$ linkerd-buoyant version CLI version: v0.4.4 Agent version: v0.4.4 ``` @@ -2248,7 +2248,7 @@ Agent version: v0.4.4 To update to the latest version: ```bash -linkerd-buoyant install | kubectl apply -f - +$ linkerd-buoyant install | kubectl apply -f - ``` ### √ buoyant-cloud-agent Deployment is running a single pod @@ -2262,7 +2262,7 @@ linkerd-buoyant install | kubectl apply -f - `buoyant-cloud-agent` should run as a singleton. Check for other pods: ```bash -kubectl get po -A --selector app=buoyant-cloud-agent +$ kubectl get po -A --selector app=buoyant-cloud-agent ``` ### √ buoyant-cloud-metrics DaemonSet exists @@ -2276,14 +2276,14 @@ kubectl get po -A --selector app=buoyant-cloud-agent Ensure the `buoyant-cloud-metrics` DaemonSet exists: ```bash -kubectl -n buoyant-cloud get daemonset/buoyant-cloud-metrics +$ kubectl -n buoyant-cloud get daemonset/buoyant-cloud-metrics ``` If the DaemonSet does not exist, the `linkerd-buoyant` installation may be missing or incomplete. To reinstall the extension: ```bash -linkerd-buoyant install | kubectl apply -f - +$ linkerd-buoyant install | kubectl apply -f - ``` ### √ buoyant-cloud-metrics DaemonSet is running @@ -2299,7 +2299,7 @@ everything to start up. If this is a permanent error, you'll want to validate the state of the `buoyant-cloud-metrics` DaemonSet with: ```bash -kubectl -n buoyant-cloud get po --selector app=buoyant-cloud-metrics +$ kubectl -n buoyant-cloud get po --selector app=buoyant-cloud-metrics NAME READY STATUS RESTARTS AGE buoyant-cloud-metrics-kt9mv 2/2 Running 0 163m buoyant-cloud-metrics-q8jhj 2/2 Running 0 163m @@ -2325,7 +2325,7 @@ Ensure the `buoyant-cloud-metrics` pods are injected, the `READY` column should show `2/2`: ```bash -kubectl -n buoyant-cloud get pods --selector app=buoyant-cloud-metrics +$ kubectl -n buoyant-cloud get pods --selector app=buoyant-cloud-metrics NAME READY STATUS RESTARTS AGE buoyant-cloud-metrics-kt9mv 2/2 Running 0 166m buoyant-cloud-metrics-q8jhj 2/2 Running 0 166m @@ -2347,7 +2347,7 @@ Make sure that the `proxy-injector` is working correctly by running Check the version with: ```bash -kubectl -n buoyant-cloud get daemonset/buoyant-cloud-metrics -o jsonpath='{.metadata.labels}' +$ kubectl -n buoyant-cloud get daemonset/buoyant-cloud-metrics -o jsonpath='{.metadata.labels}' {"app.kubernetes.io/name":"metrics","app.kubernetes.io/part-of":"buoyant-cloud","app.kubernetes.io/version":"v0.4.4"} ``` diff --git a/linkerd.io/content/2.19/tasks/troubleshooting.md b/linkerd.io/content/2.19/tasks/troubleshooting.md index c65e8fb63c..f18644ca83 100644 --- a/linkerd.io/content/2.19/tasks/troubleshooting.md +++ b/linkerd.io/content/2.19/tasks/troubleshooting.md @@ -230,7 +230,7 @@ Example failure: Ensure the Linkerd ClusterRoles exist: ```bash -kubectl get clusterroles | grep linkerd +$ kubectl get clusterroles | grep linkerd linkerd-linkerd-destination 9d linkerd-linkerd-identity 9d linkerd-linkerd-proxy-injector 9d @@ -240,7 +240,7 @@ linkerd-policy 9d Also ensure you have permission to create ClusterRoles: ```bash -kubectl auth can-i create clusterroles +$ kubectl auth can-i create clusterroles yes ``` @@ -257,7 +257,7 @@ Example failure: Ensure the Linkerd ClusterRoleBindings exist: ```bash -kubectl get clusterrolebindings | grep linkerd +$ kubectl get clusterrolebindings | grep linkerd linkerd-linkerd-destination 9d linkerd-linkerd-identity 9d linkerd-linkerd-proxy-injector 9d @@ -267,7 +267,7 @@ linkerd-destination-policy 9d Also ensure you have permission to create ClusterRoleBindings: ```bash -kubectl auth can-i create clusterrolebindings +$ kubectl auth can-i create clusterrolebindings yes ``` @@ -284,7 +284,7 @@ Example failure: Ensure the Linkerd ServiceAccounts exist: ```bash -kubectl -n linkerd get serviceaccounts +$ kubectl -n linkerd get serviceaccounts NAME SECRETS AGE default 1 14m linkerd-destination 1 14m @@ -297,7 +297,7 @@ Also ensure you have permission to create ServiceAccounts in the Linkerd namespace: ```bash -kubectl -n linkerd auth can-i create serviceaccounts +$ kubectl -n linkerd auth can-i create serviceaccounts yes ``` @@ -314,7 +314,7 @@ Example failure: Ensure the Linkerd CRD exists: ```bash -kubectl get customresourcedefinitions +$ kubectl get customresourcedefinitions NAME CREATED AT serviceprofiles.linkerd.io 2019-04-25T21:47:31Z ``` @@ -322,7 +322,7 @@ serviceprofiles.linkerd.io 2019-04-25T21:47:31Z Also ensure you have permission to create CRDs: ```bash -kubectl auth can-i create customresourcedefinitions +$ kubectl auth can-i create customresourcedefinitions yes ``` @@ -339,14 +339,14 @@ Example failure: Ensure the Linkerd MutatingWebhookConfigurations exists: ```bash -kubectl get mutatingwebhookconfigurations | grep linkerd +$ kubectl get mutatingwebhookconfigurations | grep linkerd linkerd-proxy-injector-webhook-config 2019-07-01T13:13:26Z ``` Also ensure you have permission to create MutatingWebhookConfigurations: ```bash -kubectl auth can-i create mutatingwebhookconfigurations +$ kubectl auth can-i create mutatingwebhookconfigurations yes ``` @@ -363,14 +363,14 @@ Example failure: Ensure the Linkerd ValidatingWebhookConfiguration exists: ```bash -kubectl get validatingwebhookconfigurations | grep linkerd +$ kubectl get validatingwebhookconfigurations | grep linkerd linkerd-sp-validator-webhook-config 2019-07-01T13:13:26Z ``` Also ensure you have permission to create ValidatingWebhookConfigurations: ```bash -kubectl auth can-i create validatingwebhookconfigurations +$ kubectl auth can-i create validatingwebhookconfigurations yes ``` @@ -418,7 +418,7 @@ Example failure: Ensure the Linkerd ConfigMap exists: ```bash -kubectl -n linkerd get configmap/linkerd-config +$ kubectl -n linkerd get configmap/linkerd-config NAME DATA AGE linkerd-config 3 61m ``` @@ -426,7 +426,7 @@ linkerd-config 3 61m Also ensure you have permission to create ConfigMaps: ```bash -kubectl -n linkerd auth can-i create configmap +$ kubectl -n linkerd auth can-i create configmap yes ``` @@ -780,7 +780,7 @@ Example failure: Verify the state of the control plane pods with: ```bash -kubectl -n linkerd get po +$ kubectl -n linkerd get po NAME READY STATUS RESTARTS AGE linkerd-destination-5fd7b5d466-szgqm 2/2 Running 1 12m linkerd-identity-54df78c479-hbh5m 2/2 Running 0 12m @@ -862,7 +862,7 @@ Ensure you can connect to the Linkerd version check endpoint from the environment the `linkerd` cli is running: ```bash -curl "https://versioncheck.linkerd.io/version.json?version=edge-19.1.2&uuid=test-uuid&source=cli" +$ curl "https://versioncheck.linkerd.io/version.json?version=edge-19.1.2&uuid=test-uuid&source=cli" {"stable":"stable-2.1.0","edge":"edge-19.1.2"} ``` @@ -1310,7 +1310,7 @@ Make sure multicluster extension is correctly installed and that the `links.multicluster.linkerd.io` CRD is present. ```bash -kubectl get crds | grep multicluster +$ kubectl get crds | grep multicluster NAME CREATED AT links.multicluster.linkerd.io 2021-03-10T09:58:10Z ``` @@ -1400,7 +1400,7 @@ the rules section. Expected rules for `linkerd-service-mirror-access-local-resources` cluster role: ```bash -kubectl --context=local get clusterrole linkerd-service-mirror-access-local-resources -o yaml +$ kubectl --context=local get clusterrole linkerd-service-mirror-access-local-resources -o yaml kind: ClusterRole metadata: labels: @@ -1466,7 +1466,7 @@ everything to start up. If this is a permanent error, you'll want to validate the state of the controller pod with: ```bash -kubectl --all-namespaces get po --selector linkerd.io/control-plane-component=linkerd-service-mirror +$ kubectl --all-namespaces get po --selector linkerd.io/control-plane-component=linkerd-service-mirror NAME READY STATUS RESTARTS AGE linkerd-service-mirror-7bb8ff5967-zg265 2/2 Running 0 50m ``` @@ -1612,7 +1612,7 @@ Example failure: Ensure the linkerd-viz extension ClusterRoles exist: ```bash -kubectl get clusterroles | grep linkerd-viz +$ kubectl get clusterroles | grep linkerd-viz linkerd-linkerd-viz-metrics-api 2021-01-26T18:02:17Z linkerd-linkerd-viz-prometheus 2021-01-26T18:02:17Z linkerd-linkerd-viz-tap 2021-01-26T18:02:17Z @@ -1640,7 +1640,7 @@ Example failure: Ensure the linkerd-viz extension ClusterRoleBindings exist: ```bash -kubectl get clusterrolebindings | grep linkerd-viz +$ kubectl get clusterrolebindings | grep linkerd-viz linkerd-linkerd-viz-metrics-api ClusterRole/linkerd-linkerd-viz-metrics-api 18h linkerd-linkerd-viz-prometheus ClusterRole/linkerd-linkerd-viz-prometheus 18h linkerd-linkerd-viz-tap ClusterRole/linkerd-linkerd-viz-tap 18h @@ -1652,7 +1652,7 @@ linkerd-linkerd-viz-web-check ClusterRole/linkerd-linke Also ensure you have permission to create ClusterRoleBindings: ```bash -kubectl auth can-i create clusterrolebindings +$ kubectl auth can-i create clusterrolebindings yes ``` @@ -1741,7 +1741,7 @@ requirements in the cluster: Ensure all the linkerd-viz pods are injected ```bash -kubectl -n linkerd-viz get pods +$ kubectl -n linkerd-viz get pods NAME READY STATUS RESTARTS AGE grafana-68cddd7cc8-nrv4h 2/2 Running 3 18h metrics-api-77f684f7c7-hnw8r 2/2 Running 2 18h @@ -1765,7 +1765,7 @@ Make sure that the `proxy-injector` is working correctly by running Ensure all the linkerd-viz pods are running with 2/2 ```bash -kubectl -n linkerd-viz get pods +$ kubectl -n linkerd-viz get pods NAME READY STATUS RESTARTS AGE grafana-68cddd7cc8-nrv4h 2/2 Running 3 18h metrics-api-77f684f7c7-hnw8r 2/2 Running 2 18h @@ -1789,12 +1789,12 @@ Make sure that the `proxy-injector` is working correctly by running Ensure all the prometheus related resources are present and running correctly. ```bash -❯ kubectl -n linkerd-viz get deploy,cm | grep prometheus +$ kubectl -n linkerd-viz get deploy,cm | grep prometheus deployment.apps/prometheus 1/1 1 1 3m18s configmap/prometheus-config 1 3m18s -❯ kubectl get clusterRoleBindings | grep prometheus +$ kubectl get clusterRoleBindings | grep prometheus linkerd-linkerd-viz-prometheus ClusterRole/linkerd-linkerd-viz-prometheus 3m37s -❯ kubectl get clusterRoles | grep prometheus +$ kubectl get clusterRoles | grep prometheus linkerd-linkerd-viz-prometheus 2021-02-26T06:03:11Zh ``` @@ -1810,7 +1810,7 @@ Example failure: Verify that the metrics API pod is running correctly ```bash -❯ kubectl -n linkerd-viz get pods +$ kubectl -n linkerd-viz get pods NAME READY STATUS RESTARTS AGE metrics-api-7bb8cb8489-cbq4m 2/2 Running 0 4m58s tap-injector-6b9bc6fc4-cgbr4 2/2 Running 0 4m56s @@ -1936,7 +1936,7 @@ Ensure you can connect to the Linkerd Buoyant version check endpoint from the environment the `linkerd` cli is running: ```bash -curl https://buoyant.cloud/version.json +$ curl https://buoyant.cloud/version.json {"linkerd-buoyant":"v0.4.4"} ``` @@ -2001,7 +2001,7 @@ linkerd-buoyant install | kubectl apply -f - Ensure that the cluster role exists: ```bash -kubectl get clusterrole buoyant-cloud-agent +$ kubectl get clusterrole buoyant-cloud-agent NAME CREATED AT buoyant-cloud-agent 2020-11-13T00:59:50Z ``` @@ -2009,7 +2009,7 @@ buoyant-cloud-agent 2020-11-13T00:59:50Z Also ensure you have permission to create ClusterRoles: ```bash -kubectl auth can-i create ClusterRoles +$ kubectl auth can-i create clusterroles yes ``` @@ -2024,7 +2024,7 @@ yes Ensure that the cluster role binding exists: ```bash -kubectl get clusterrolebinding buoyant-cloud-agent +$ kubectl get clusterrolebinding buoyant-cloud-agent NAME ROLE AGE buoyant-cloud-agent ClusterRole/buoyant-cloud-agent 301d ``` @@ -2032,7 +2032,7 @@ buoyant-cloud-agent ClusterRole/buoyant-cloud-agent 301d Also ensure you have permission to create ClusterRoleBindings: ```bash -kubectl auth can-i create ClusterRoleBindings +$ kubectl auth can-i create ClusterRoleBindings yes ``` @@ -2047,7 +2047,7 @@ yes Ensure that the service account exists: ```bash -kubectl -n buoyant-cloud get serviceaccount buoyant-cloud-agent +$ kubectl -n buoyant-cloud get serviceaccount buoyant-cloud-agent NAME SECRETS AGE buoyant-cloud-agent 1 301d ``` @@ -2055,7 +2055,7 @@ buoyant-cloud-agent 1 301d Also ensure you have permission to create ServiceAccounts: ```bash -kubectl -n buoyant-cloud auth can-i create ServiceAccount +$ kubectl -n buoyant-cloud auth can-i create ServiceAccount yes ``` @@ -2070,7 +2070,7 @@ yes Ensure that the secret exists: ```bash -kubectl -n buoyant-cloud get secret buoyant-cloud-id +$ kubectl -n buoyant-cloud get secret buoyant-cloud-id NAME TYPE DATA AGE buoyant-cloud-id Opaque 4 301d ``` @@ -2078,7 +2078,7 @@ buoyant-cloud-id Opaque 4 301d Also ensure you have permission to create ServiceAccounts: ```bash -kubectl -n buoyant-cloud auth can-i create ServiceAccount +$ kubectl -n buoyant-cloud auth can-i create ServiceAccount yes ``` @@ -2093,14 +2093,14 @@ yes Ensure the `buoyant-cloud-agent` Deployment exists: ```bash -kubectl -n buoyant-cloud get deploy/buoyant-cloud-agent +$ kubectl -n buoyant-cloud get deploy/buoyant-cloud-agent ``` If the Deployment does not exist, the `linkerd-buoyant` installation may be missing or incomplete. To reinstall the extension: ```bash -linkerd-buoyant install | kubectl apply -f - +$ linkerd-buoyant install | kubectl apply -f - ``` ### √ buoyant-cloud-agent Deployment is running @@ -2116,7 +2116,7 @@ everything to start up. If this is a permanent error, you'll want to validate the state of the `buoyant-cloud-agent` Deployment with: ```bash -kubectl -n buoyant-cloud get po --selector app=buoyant-cloud-agent +$ kubectl -n buoyant-cloud get po --selector app=buoyant-cloud-agent NAME READY STATUS RESTARTS AGE buoyant-cloud-agent-6b8c6888d7-htr7d 2/2 Running 0 156m ``` @@ -2139,7 +2139,7 @@ Ensure the `buoyant-cloud-agent` pod is injected, the `READY` column should show `2/2`: ```bash -kubectl -n buoyant-cloud get pods --selector app=buoyant-cloud-agent +$ kubectl -n buoyant-cloud get pods --selector app=buoyant-cloud-agent NAME READY STATUS RESTARTS AGE buoyant-cloud-agent-6b8c6888d7-htr7d 2/2 Running 0 161m ``` @@ -2158,7 +2158,7 @@ Make sure that the `proxy-injector` is working correctly by running Check the version with: ```bash -linkerd-buoyant version +$ linkerd-buoyant version CLI version: v0.4.4 Agent version: v0.4.4 ``` @@ -2166,7 +2166,7 @@ Agent version: v0.4.4 To update to the latest version: ```bash -linkerd-buoyant install | kubectl apply -f - +$ linkerd-buoyant install | kubectl apply -f - ``` ### √ buoyant-cloud-agent Deployment is running a single pod @@ -2180,7 +2180,7 @@ linkerd-buoyant install | kubectl apply -f - `buoyant-cloud-agent` should run as a singleton. Check for other pods: ```bash -kubectl get po -A --selector app=buoyant-cloud-agent +$ kubectl get po -A --selector app=buoyant-cloud-agent ``` ### √ buoyant-cloud-metrics DaemonSet exists @@ -2194,14 +2194,14 @@ kubectl get po -A --selector app=buoyant-cloud-agent Ensure the `buoyant-cloud-metrics` DaemonSet exists: ```bash -kubectl -n buoyant-cloud get daemonset/buoyant-cloud-metrics +$ kubectl -n buoyant-cloud get daemonset/buoyant-cloud-metrics ``` If the DaemonSet does not exist, the `linkerd-buoyant` installation may be missing or incomplete. To reinstall the extension: ```bash -linkerd-buoyant install | kubectl apply -f - +$ linkerd-buoyant install | kubectl apply -f - ``` ### √ buoyant-cloud-metrics DaemonSet is running @@ -2217,7 +2217,7 @@ everything to start up. If this is a permanent error, you'll want to validate the state of the `buoyant-cloud-metrics` DaemonSet with: ```bash -kubectl -n buoyant-cloud get po --selector app=buoyant-cloud-metrics +$ kubectl -n buoyant-cloud get po --selector app=buoyant-cloud-metrics NAME READY STATUS RESTARTS AGE buoyant-cloud-metrics-kt9mv 2/2 Running 0 163m buoyant-cloud-metrics-q8jhj 2/2 Running 0 163m @@ -2243,7 +2243,7 @@ Ensure the `buoyant-cloud-metrics` pods are injected, the `READY` column should show `2/2`: ```bash -kubectl -n buoyant-cloud get pods --selector app=buoyant-cloud-metrics +$ kubectl -n buoyant-cloud get pods --selector app=buoyant-cloud-metrics NAME READY STATUS RESTARTS AGE buoyant-cloud-metrics-kt9mv 2/2 Running 0 166m buoyant-cloud-metrics-q8jhj 2/2 Running 0 166m @@ -2265,7 +2265,7 @@ Make sure that the `proxy-injector` is working correctly by running Check the version with: ```bash -kubectl -n buoyant-cloud get daemonset/buoyant-cloud-metrics -o jsonpath='{.metadata.labels}' +$ kubectl -n buoyant-cloud get daemonset/buoyant-cloud-metrics -o jsonpath='{.metadata.labels}' {"app.kubernetes.io/name":"metrics","app.kubernetes.io/part-of":"buoyant-cloud","app.kubernetes.io/version":"v0.4.4"} ``` From 799dd236fc3bc7b617f41ca8f0f5eeb98bb76a24 Mon Sep 17 00:00:00 2001 From: bezarsnba Date: Sat, 31 Jan 2026 12:55:55 -0300 Subject: [PATCH 09/31] revision files retricting-access Signed-off-by: bezarsnba --- .../content/2-edge/tasks/restricting-access.md | 10 +++++----- .../content/2.16/tasks/restricting-access.md | 12 ++++++------ .../content/2.17/tasks/restricting-access.md | 12 ++++++------ .../content/2.18/tasks/restricting-access.md | 12 ++++++------ .../content/2.19/tasks/restricting-access.md | 16 ++++++++-------- 5 files changed, 31 insertions(+), 31 deletions(-) diff --git a/linkerd.io/content/2-edge/tasks/restricting-access.md b/linkerd.io/content/2-edge/tasks/restricting-access.md index c9850725f7..9587079da8 100644 --- a/linkerd.io/content/2-edge/tasks/restricting-access.md +++ b/linkerd.io/content/2-edge/tasks/restricting-access.md @@ -35,7 +35,7 @@ Linkerd custom resource which describes a specific port of a workload. Once the access it (we'll see how to authorize clients in a moment). ```bash -kubectl apply -f - < linkerd viz authz -n emojivoto deploy/voting +$ linkerd viz authz -n emojivoto deploy/voting ROUTE SERVER AUTHORIZATION UNAUTHORIZED SUCCESS RPS LATENCY_P50 LATENCY_P95 LATENCY_P99 default default:all-unauthenticated default/all-unauthenticated 0.0rps 100.00% 0.1rps 1ms 1ms 1ms probe default:all-unauthenticated default/probe 0.0rps 100.00% 0.2rps 1ms 1ms 1ms @@ -123,7 +123,7 @@ We can also test that request from other pods will be rejected by creating a `grpcurl` pod and attempting to access the Voting service from it: ```bash -> kubectl run grpcurl --rm -it --image=networld/grpcurl --restart=Never --command -- ./grpcurl -plaintext voting-svc.emojivoto:8080 emojivoto.v1.VotingService/VoteDog +$ kubectl run grpcurl --rm -it --image=networld/grpcurl --restart=Never --command -- ./grpcurl -plaintext voting-svc.emojivoto:8080 emojivoto.v1.VotingService/VoteDog Error invoking method "emojivoto.v1.VotingService/VoteDog": failed to query for service descriptor "emojivoto.v1.VotingService": rpc error: code = PermissionDenied desc = pod "grpcurl" deleted pod default/grpcurl terminated (Error) @@ -153,7 +153,7 @@ following logic when deciding whether to allow a request: We can set the default policy to `deny` using the `linkerd upgrade` command: ```bash -> linkerd upgrade --default-inbound-policy deny | kubectl apply -f - +$ linkerd upgrade --default-inbound-policy deny | kubectl apply -f - ``` Alternatively, default policies can be set on individual workloads or namespaces diff --git a/linkerd.io/content/2.16/tasks/restricting-access.md b/linkerd.io/content/2.16/tasks/restricting-access.md index c9850725f7..d949af35f6 100644 --- a/linkerd.io/content/2.16/tasks/restricting-access.md +++ b/linkerd.io/content/2.16/tasks/restricting-access.md @@ -35,7 +35,7 @@ Linkerd custom resource which describes a specific port of a workload. Once the access it (we'll see how to authorize clients in a moment). ```bash -kubectl apply -f - < linkerd viz authz -n emojivoto deploy/voting +$ linkerd viz authz -n emojivoto deploy/voting ROUTE SERVER AUTHORIZATION UNAUTHORIZED SUCCESS RPS LATENCY_P50 LATENCY_P95 LATENCY_P99 default default:all-unauthenticated default/all-unauthenticated 0.0rps 100.00% 0.1rps 1ms 1ms 1ms probe default:all-unauthenticated default/probe 0.0rps 100.00% 0.2rps 1ms 1ms 1ms @@ -84,7 +84,7 @@ to the Voting `Server` we created above. Note that meshed mTLS uses based on `ServiceAccounts`. ```bash -kubectl apply -f - < linkerd viz authz -n emojivoto deploy/voting +$ linkerd viz authz -n emojivoto deploy/voting ROUTE SERVER AUTHORIZATION UNAUTHORIZED SUCCESS RPS LATENCY_P50 LATENCY_P95 LATENCY_P99 default default:all-unauthenticated default/all-unauthenticated 0.0rps 100.00% 0.1rps 1ms 1ms 1ms probe default:all-unauthenticated default/probe 0.0rps 100.00% 0.2rps 1ms 1ms 1ms @@ -123,7 +123,7 @@ We can also test that request from other pods will be rejected by creating a `grpcurl` pod and attempting to access the Voting service from it: ```bash -> kubectl run grpcurl --rm -it --image=networld/grpcurl --restart=Never --command -- ./grpcurl -plaintext voting-svc.emojivoto:8080 emojivoto.v1.VotingService/VoteDog +$ kubectl run grpcurl --rm -it --image=networld/grpcurl --restart=Never --command -- ./grpcurl -plaintext voting-svc.emojivoto:8080 emojivoto.v1.VotingService/VoteDog Error invoking method "emojivoto.v1.VotingService/VoteDog": failed to query for service descriptor "emojivoto.v1.VotingService": rpc error: code = PermissionDenied desc = pod "grpcurl" deleted pod default/grpcurl terminated (Error) @@ -153,7 +153,7 @@ following logic when deciding whether to allow a request: We can set the default policy to `deny` using the `linkerd upgrade` command: ```bash -> linkerd upgrade --default-inbound-policy deny | kubectl apply -f - +linkerd upgrade --default-inbound-policy deny | kubectl apply -f - ``` Alternatively, default policies can be set on individual workloads or namespaces diff --git a/linkerd.io/content/2.17/tasks/restricting-access.md b/linkerd.io/content/2.17/tasks/restricting-access.md index c9850725f7..d949af35f6 100644 --- a/linkerd.io/content/2.17/tasks/restricting-access.md +++ b/linkerd.io/content/2.17/tasks/restricting-access.md @@ -35,7 +35,7 @@ Linkerd custom resource which describes a specific port of a workload. Once the access it (we'll see how to authorize clients in a moment). ```bash -kubectl apply -f - < linkerd viz authz -n emojivoto deploy/voting +$ linkerd viz authz -n emojivoto deploy/voting ROUTE SERVER AUTHORIZATION UNAUTHORIZED SUCCESS RPS LATENCY_P50 LATENCY_P95 LATENCY_P99 default default:all-unauthenticated default/all-unauthenticated 0.0rps 100.00% 0.1rps 1ms 1ms 1ms probe default:all-unauthenticated default/probe 0.0rps 100.00% 0.2rps 1ms 1ms 1ms @@ -84,7 +84,7 @@ to the Voting `Server` we created above. Note that meshed mTLS uses based on `ServiceAccounts`. ```bash -kubectl apply -f - < linkerd viz authz -n emojivoto deploy/voting +$ linkerd viz authz -n emojivoto deploy/voting ROUTE SERVER AUTHORIZATION UNAUTHORIZED SUCCESS RPS LATENCY_P50 LATENCY_P95 LATENCY_P99 default default:all-unauthenticated default/all-unauthenticated 0.0rps 100.00% 0.1rps 1ms 1ms 1ms probe default:all-unauthenticated default/probe 0.0rps 100.00% 0.2rps 1ms 1ms 1ms @@ -123,7 +123,7 @@ We can also test that request from other pods will be rejected by creating a `grpcurl` pod and attempting to access the Voting service from it: ```bash -> kubectl run grpcurl --rm -it --image=networld/grpcurl --restart=Never --command -- ./grpcurl -plaintext voting-svc.emojivoto:8080 emojivoto.v1.VotingService/VoteDog +$ kubectl run grpcurl --rm -it --image=networld/grpcurl --restart=Never --command -- ./grpcurl -plaintext voting-svc.emojivoto:8080 emojivoto.v1.VotingService/VoteDog Error invoking method "emojivoto.v1.VotingService/VoteDog": failed to query for service descriptor "emojivoto.v1.VotingService": rpc error: code = PermissionDenied desc = pod "grpcurl" deleted pod default/grpcurl terminated (Error) @@ -153,7 +153,7 @@ following logic when deciding whether to allow a request: We can set the default policy to `deny` using the `linkerd upgrade` command: ```bash -> linkerd upgrade --default-inbound-policy deny | kubectl apply -f - +linkerd upgrade --default-inbound-policy deny | kubectl apply -f - ``` Alternatively, default policies can be set on individual workloads or namespaces diff --git a/linkerd.io/content/2.18/tasks/restricting-access.md b/linkerd.io/content/2.18/tasks/restricting-access.md index c9850725f7..20dba47a35 100644 --- a/linkerd.io/content/2.18/tasks/restricting-access.md +++ b/linkerd.io/content/2.18/tasks/restricting-access.md @@ -35,7 +35,7 @@ Linkerd custom resource which describes a specific port of a workload. Once the access it (we'll see how to authorize clients in a moment). ```bash -kubectl apply -f - < linkerd viz authz -n emojivoto deploy/voting +$ linkerd viz authz -n emojivoto deploy/voting ROUTE SERVER AUTHORIZATION UNAUTHORIZED SUCCESS RPS LATENCY_P50 LATENCY_P95 LATENCY_P99 default default:all-unauthenticated default/all-unauthenticated 0.0rps 100.00% 0.1rps 1ms 1ms 1ms probe default:all-unauthenticated default/probe 0.0rps 100.00% 0.2rps 1ms 1ms 1ms @@ -84,7 +84,7 @@ to the Voting `Server` we created above. Note that meshed mTLS uses based on `ServiceAccounts`. ```bash -kubectl apply -f - < linkerd viz authz -n emojivoto deploy/voting +$ linkerd viz authz -n emojivoto deploy/voting ROUTE SERVER AUTHORIZATION UNAUTHORIZED SUCCESS RPS LATENCY_P50 LATENCY_P95 LATENCY_P99 default default:all-unauthenticated default/all-unauthenticated 0.0rps 100.00% 0.1rps 1ms 1ms 1ms probe default:all-unauthenticated default/probe 0.0rps 100.00% 0.2rps 1ms 1ms 1ms @@ -123,7 +123,7 @@ We can also test that request from other pods will be rejected by creating a `grpcurl` pod and attempting to access the Voting service from it: ```bash -> kubectl run grpcurl --rm -it --image=networld/grpcurl --restart=Never --command -- ./grpcurl -plaintext voting-svc.emojivoto:8080 emojivoto.v1.VotingService/VoteDog +$ kubectl run grpcurl --rm -it --image=networld/grpcurl --restart=Never --command -- ./grpcurl -plaintext voting-svc.emojivoto:8080 emojivoto.v1.VotingService/VoteDog Error invoking method "emojivoto.v1.VotingService/VoteDog": failed to query for service descriptor "emojivoto.v1.VotingService": rpc error: code = PermissionDenied desc = pod "grpcurl" deleted pod default/grpcurl terminated (Error) @@ -153,7 +153,7 @@ following logic when deciding whether to allow a request: We can set the default policy to `deny` using the `linkerd upgrade` command: ```bash -> linkerd upgrade --default-inbound-policy deny | kubectl apply -f - +$ linkerd upgrade --default-inbound-policy deny | kubectl apply -f - ``` Alternatively, default policies can be set on individual workloads or namespaces diff --git a/linkerd.io/content/2.19/tasks/restricting-access.md b/linkerd.io/content/2.19/tasks/restricting-access.md index c9850725f7..ee1a438573 100644 --- a/linkerd.io/content/2.19/tasks/restricting-access.md +++ b/linkerd.io/content/2.19/tasks/restricting-access.md @@ -21,9 +21,9 @@ haven't already done this. Inject and install the Emojivoto application: ```bash -linkerd inject https://run.linkerd.io/emojivoto.yml | kubectl apply -f - +$ linkerd inject https://run.linkerd.io/emojivoto.yml | kubectl apply -f - ... -linkerd check -n emojivoto --proxy -o short +$ linkerd check -n emojivoto --proxy -o short ... ``` @@ -35,7 +35,7 @@ Linkerd custom resource which describes a specific port of a workload. Once the access it (we'll see how to authorize clients in a moment). ```bash -kubectl apply -f - < linkerd viz authz -n emojivoto deploy/voting +$ linkerd viz authz -n emojivoto deploy/voting ROUTE SERVER AUTHORIZATION UNAUTHORIZED SUCCESS RPS LATENCY_P50 LATENCY_P95 LATENCY_P99 default default:all-unauthenticated default/all-unauthenticated 0.0rps 100.00% 0.1rps 1ms 1ms 1ms probe default:all-unauthenticated default/probe 0.0rps 100.00% 0.2rps 1ms 1ms 1ms @@ -84,7 +84,7 @@ to the Voting `Server` we created above. Note that meshed mTLS uses based on `ServiceAccounts`. ```bash -kubectl apply -f - < linkerd viz authz -n emojivoto deploy/voting +$ linkerd viz authz -n emojivoto deploy/voting ROUTE SERVER AUTHORIZATION UNAUTHORIZED SUCCESS RPS LATENCY_P50 LATENCY_P95 LATENCY_P99 default default:all-unauthenticated default/all-unauthenticated 0.0rps 100.00% 0.1rps 1ms 1ms 1ms probe default:all-unauthenticated default/probe 0.0rps 100.00% 0.2rps 1ms 1ms 1ms @@ -123,7 +123,7 @@ We can also test that request from other pods will be rejected by creating a `grpcurl` pod and attempting to access the Voting service from it: ```bash -> kubectl run grpcurl --rm -it --image=networld/grpcurl --restart=Never --command -- ./grpcurl -plaintext voting-svc.emojivoto:8080 emojivoto.v1.VotingService/VoteDog +$ kubectl run grpcurl --rm -it --image=networld/grpcurl --restart=Never --command -- ./grpcurl -plaintext voting-svc.emojivoto:8080 emojivoto.v1.VotingService/VoteDog Error invoking method "emojivoto.v1.VotingService/VoteDog": failed to query for service descriptor "emojivoto.v1.VotingService": rpc error: code = PermissionDenied desc = pod "grpcurl" deleted pod default/grpcurl terminated (Error) @@ -153,7 +153,7 @@ following logic when deciding whether to allow a request: We can set the default policy to `deny` using the `linkerd upgrade` command: ```bash -> linkerd upgrade --default-inbound-policy deny | kubectl apply -f - +linkerd upgrade --default-inbound-policy deny | kubectl apply -f - ``` Alternatively, default policies can be set on individual workloads or namespaces From 970d2ce38bb64cda9a32922e673dc8b71505886b Mon Sep 17 00:00:00 2001 From: bezarsnba Date: Sat, 31 Jan 2026 12:57:21 -0300 Subject: [PATCH 10/31] revision _index.md Signed-off-by: bezarsnba --- linkerd.io/content/2-edge/getting-started/_index.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/linkerd.io/content/2-edge/getting-started/_index.md b/linkerd.io/content/2-edge/getting-started/_index.md index cf8c40d543..ef3519b59c 100644 --- a/linkerd.io/content/2-edge/getting-started/_index.md +++ b/linkerd.io/content/2-edge/getting-started/_index.md @@ -60,8 +60,8 @@ To install the CLI manually, run: ```bash # Setting LINKERD2_VERSION sets the version to install. # If unset, you'll get the latest available edge version. -$ export LINKERD2_VERSION={{< edge-version >}} -$ curl --proto '=https' --tlsv1.2 -sSfL https://run.linkerd.io/install-edge | sh +export LINKERD2_VERSION={{< edge-version >}} +curl --proto '=https' --tlsv1.2 -sSfL https://run.linkerd.io/install-edge | sh ``` Be sure to follow the instructions to add it to your path: From 90bdda4860042efb2cde1b270946f6f4e7814272 Mon Sep 17 00:00:00 2001 From: beza Date: Sat, 31 Jan 2026 13:14:23 -0300 Subject: [PATCH 11/31] Apply suggestion from @kflynn Co-authored-by: Flynn --- linkerd.io/content/2-edge/tasks/managing-egress-traffic.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/linkerd.io/content/2-edge/tasks/managing-egress-traffic.md b/linkerd.io/content/2-edge/tasks/managing-egress-traffic.md index d851205248..6785aca84d 100644 --- a/linkerd.io/content/2-edge/tasks/managing-egress-traffic.md +++ b/linkerd.io/content/2-edge/tasks/managing-egress-traffic.md @@ -472,7 +472,7 @@ curl http://httpbin.org/get } # encrypted traffic can target all paths and hosts -curl https://httpbin.org/ip +curl https://httpbin.org/ip { "origin": "51.116.126.217" } From 958f5f3352298f4dc3c2d9c1b7461f99c8a2b9b9 Mon Sep 17 00:00:00 2001 From: beza Date: Sat, 31 Jan 2026 13:18:24 -0300 Subject: [PATCH 12/31] Apply suggestion from @kflynn Co-authored-by: Flynn --- .../content/2-edge/tasks/multicluster-using-statefulsets.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/linkerd.io/content/2-edge/tasks/multicluster-using-statefulsets.md b/linkerd.io/content/2-edge/tasks/multicluster-using-statefulsets.md index 645a5619d9..a2728e461b 100644 --- a/linkerd.io/content/2-edge/tasks/multicluster-using-statefulsets.md +++ b/linkerd.io/content/2-edge/tasks/multicluster-using-statefulsets.md @@ -185,7 +185,7 @@ If we now curl one of these instances, we will get back a response. ```sh # exec'd on the pod -/ curl nginx-set-0.nginx-svc.default.svc.west.cluster.local +curl nginx-set-0.nginx-svc.default.svc.west.cluster.local " From 2675fc2fb997788f50231c0f881aefc6aa2976d0 Mon Sep 17 00:00:00 2001 From: bezarsnba Date: Sat, 31 Jan 2026 13:18:51 -0300 Subject: [PATCH 13/31] revision files edge2 Signed-off-by: bezarsnba --- .../2-edge/tasks/configuring-per-route-policy.md | 10 +++++----- .../content/2-edge/tasks/managing-egress-traffic.md | 8 ++++---- linkerd.io/content/2-edge/tasks/restricting-access.md | 8 ++++---- 3 files changed, 13 insertions(+), 13 deletions(-) diff --git a/linkerd.io/content/2-edge/tasks/configuring-per-route-policy.md b/linkerd.io/content/2-edge/tasks/configuring-per-route-policy.md index aaa3a9b43d..a62c218b47 100644 --- a/linkerd.io/content/2-edge/tasks/configuring-per-route-policy.md +++ b/linkerd.io/content/2-edge/tasks/configuring-per-route-policy.md @@ -87,7 +87,7 @@ First, let's run the `linkerd viz authz` command to list the authorization resources that currently exist for the `authors` deployment: ```bash -linkerd viz authz -n booksapp deploy/authors +$ linkerd viz authz -n booksapp deploy/authors ROUTE SERVER AUTHORIZATION UNAUTHORIZED SUCCESS RPS LATENCY_P50 LATENCY_P95 LATENCY_P99 default default:all-unauthenticated default/all-unauthenticated 0.0rps 70.31% 8.1rps 1ms 43ms 49ms probe default:all-unauthenticated default/probe 0.0rps 100.00% 0.3rps 1ms 1ms 1ms @@ -124,7 +124,7 @@ Now that we've defined a [`Server`] for the authors `Deployment`, we can run the currently unauthorized: ```bash -linkerd viz authz -n booksapp deploy/authors +$ linkerd viz authz -n booksapp deploy/authors ROUTE SERVER AUTHORIZATION UNAUTHORIZED SUCCESS RPS LATENCY_P50 LATENCY_P95 LATENCY_P99 default authors-server 9.5rps 0.00% 0.0rps 0ms 0ms 0ms probe authors-server default/probe 0.0rps 100.00% 0.1rps 1ms 1ms 1ms @@ -312,7 +312,7 @@ network (0.0.0.0). Running `linkerd viz authz` again, we can now see that our new policies exist: ```bash -linkerd viz authz -n booksapp deploy/authors +$ linkerd viz authz -n booksapp deploy/authors ROUTE SERVER AUTHORIZATION UNAUTHORIZED SUCCESS RPS LATENCY_P50 LATENCY_P95 LATENCY_P99 authors-get-route authors-server authorizationpolicy/authors-get-policy 0.0rps 100.00% 0.1rps 2ms 2ms 2ms authors-probe-route authors-server authorizationpolicy/authors-probe-policy 0.0rps 100.00% 0.1rps 1ms 1ms 1ms @@ -383,7 +383,7 @@ requests, but we haven't _authorized_ requests to that route. Running the requests to `authors-modify-route`: ```bash -linkerd viz authz -n booksapp deploy/authors +$ linkerd viz authz -n booksapp deploy/authors ROUTE SERVER AUTHORIZATION UNAUTHORIZED SUCCESS RPS LATENCY_P50 LATENCY_P95 LATENCY_P99 authors-get-route authors-server authorizationpolicy/authors-get-policy - - - - - - authors-modify-route authors-server 9.7rps 0.00% 0.0rps 0ms 0ms 0ms @@ -442,7 +442,7 @@ Running the `linkerd viz authz` command one last time, we now see that all traffic is authorized: ```bash -linkerd viz authz -n booksapp deploy/authors +$ linkerd viz authz -n booksapp deploy/authors ROUTE SERVER AUTHORIZATION UNAUTHORIZED SUCCESS RPS LATENCY_P50 LATENCY_P95 LATENCY_P99 authors-get-route authors-server authorizationpolicy/authors-get-policy 0.0rps 100.00% 0.1rps 0ms 0ms 0ms authors-modify-route authors-server authorizationpolicy/authors-modify-policy 0.0rps 100.00% 0.0rps 0ms 0ms 0ms diff --git a/linkerd.io/content/2-edge/tasks/managing-egress-traffic.md b/linkerd.io/content/2-edge/tasks/managing-egress-traffic.md index 6785aca84d..57142086b8 100644 --- a/linkerd.io/content/2-edge/tasks/managing-egress-traffic.md +++ b/linkerd.io/content/2-edge/tasks/managing-egress-traffic.md @@ -235,7 +235,7 @@ Interestingly enough though, if we go back to our client shell and we try to initiate HTTPS traffic to the same service, it will not be allowed: ```bash -curl -v https://httpbin.org/get +$ curl -v https://httpbin.org/get curl: (35) TLS connect error: error:00000000:lib(0)::reason(0) ``` @@ -458,7 +458,7 @@ Now let's verify all works as expected: ```bash # plaintext traffic goes as expected to the /get path -curl http://httpbin.org/get +$ curl https://httpbin.org/get { "args": {}, "headers": { @@ -472,14 +472,14 @@ curl http://httpbin.org/get } # encrypted traffic can target all paths and hosts -curl https://httpbin.org/ip +$ curl https://httpbin.org/ip { "origin": "51.116.126.217" } # arbitrary unencrypted traffic goes to the internal service -curl http://google.com +$ curl https://google.com { "requestUID": "in:http-sid:terminus-grpc:-1-h1:80-190120723", "payload": "You cannot go there right now"} diff --git a/linkerd.io/content/2-edge/tasks/restricting-access.md b/linkerd.io/content/2-edge/tasks/restricting-access.md index 9587079da8..ee1a438573 100644 --- a/linkerd.io/content/2-edge/tasks/restricting-access.md +++ b/linkerd.io/content/2-edge/tasks/restricting-access.md @@ -21,9 +21,9 @@ haven't already done this. Inject and install the Emojivoto application: ```bash -linkerd inject https://run.linkerd.io/emojivoto.yml | kubectl apply -f - +$ linkerd inject https://run.linkerd.io/emojivoto.yml | kubectl apply -f - ... -linkerd check -n emojivoto --proxy -o short +$ linkerd check -n emojivoto --proxy -o short ... ``` @@ -68,7 +68,7 @@ of requests coming to the voting service and see that all incoming requests to the voting-grpc server are currently unauthorized: ```bash -> linkerd viz authz -n emojivoto deploy/voting +$ linkerd viz authz -n emojivoto deploy/voting ROUTE SERVER AUTHORIZATION UNAUTHORIZED SUCCESS RPS LATENCY_P50 LATENCY_P95 LATENCY_P99 default default:all-unauthenticated default/all-unauthenticated 0.0rps 100.00% 0.1rps 1ms 1ms 1ms probe default:all-unauthenticated default/probe 0.0rps 100.00% 0.2rps 1ms 1ms 1ms @@ -153,7 +153,7 @@ following logic when deciding whether to allow a request: We can set the default policy to `deny` using the `linkerd upgrade` command: ```bash -$ linkerd upgrade --default-inbound-policy deny | kubectl apply -f - +linkerd upgrade --default-inbound-policy deny | kubectl apply -f - ``` Alternatively, default policies can be set on individual workloads or namespaces From 43d883c3c965705313d7551f4c61ff803f5a2d33 Mon Sep 17 00:00:00 2001 From: bezarsnba Date: Sat, 31 Jan 2026 17:00:27 -0300 Subject: [PATCH 14/31] revison multicluster security, troubleshot and others Signed-off-by: bezarsnba --- .../tasks/multicluster-using-statefulsets.md | 40 ++--- .../content/2-edge/tasks/multicluster.md | 4 +- .../2-edge/tasks/securing-linkerd-tap.md | 16 +- .../content/2-edge/tasks/troubleshooting.md | 90 +++++------ .../content/2.10/tasks/troubleshooting.md | 40 ++--- .../tasks/multicluster-using-statefulsets.md | 47 +++--- .../content/2.11/tasks/troubleshooting.md | 36 ++--- .../tasks/configuring-per-route-policy.md | 10 +- .../tasks/multicluster-using-statefulsets.md | 42 ++--- .../2.12/tasks/securing-linkerd-tap.md | 16 +- .../content/2.12/tasks/troubleshooting.md | 52 +++---- .../configuring-dynamic-request-routing.md | 8 +- .../tasks/configuring-per-route-policy.md | 10 +- .../tasks/multicluster-using-statefulsets.md | 10 +- .../content/2.13/tasks/troubleshooting.md | 2 +- .../configuring-dynamic-request-routing.md | 6 +- .../tasks/configuring-per-route-policy.md | 10 +- .../tasks/multicluster-using-statefulsets.md | 40 ++--- .../content/2.14/tasks/restricting-access.md | 12 +- .../2.14/tasks/securing-linkerd-tap.md | 16 +- .../content/2.14/tasks/troubleshooting.md | 8 +- .../configuring-dynamic-request-routing.md | 6 +- .../tasks/configuring-per-route-policy.md | 10 +- .../tasks/multicluster-using-statefulsets.md | 14 +- linkerd.io/content/2.15/tasks/multicluster.md | 4 +- .../content/2.15/tasks/restricting-access.md | 16 +- .../2.15/tasks/securing-linkerd-tap.md | 16 +- .../content/2.15/tasks/troubleshooting.md | 6 +- .../configuring-dynamic-request-routing.md | 6 +- .../tasks/configuring-per-route-policy.md | 10 +- .../tasks/multicluster-using-statefulsets.md | 40 ++--- .../content/2.16/tasks/restricting-access.md | 6 +- .../2.16/tasks/securing-linkerd-tap.md | 16 +- .../content/2.16/tasks/troubleshooting.md | 145 +++++++----------- .../configuring-dynamic-request-routing.md | 6 +- .../tasks/configuring-per-route-policy.md | 10 +- .../2.17/tasks/managing-egress-traffic.md | 14 +- .../tasks/multicluster-using-statefulsets.md | 40 ++--- .../content/2.17/tasks/restricting-access.md | 8 +- .../2.17/tasks/securing-linkerd-tap.md | 16 +- .../content/2.17/tasks/troubleshooting.md | 30 +++- .../configuring-dynamic-request-routing.md | 6 +- .../tasks/configuring-per-route-policy.md | 10 +- .../2.18/tasks/managing-egress-traffic.md | 18 +-- .../tasks/multicluster-using-statefulsets.md | 38 ++--- linkerd.io/content/2.18/tasks/multicluster.md | 2 +- .../content/2.18/tasks/restricting-access.md | 10 +- .../2.18/tasks/securing-linkerd-tap.md | 16 +- .../content/2.18/tasks/troubleshooting.md | 2 +- .../configuring-dynamic-request-routing.md | 6 +- .../tasks/configuring-per-route-policy.md | 10 +- .../2.19/tasks/managing-egress-traffic.md | 18 +-- .../tasks/multicluster-using-statefulsets.md | 42 ++--- .../2.19/tasks/securing-linkerd-tap.md | 16 +- 54 files changed, 563 insertions(+), 565 deletions(-) diff --git a/linkerd.io/content/2-edge/tasks/multicluster-using-statefulsets.md b/linkerd.io/content/2-edge/tasks/multicluster-using-statefulsets.md index a2728e461b..e486a81d78 100644 --- a/linkerd.io/content/2-edge/tasks/multicluster-using-statefulsets.md +++ b/linkerd.io/content/2-edge/tasks/multicluster-using-statefulsets.md @@ -60,10 +60,10 @@ everything. ```sh # create k3d clusters -./create.sh +$ ./create.sh # list the clusters -k3d cluster list +$ k3d cluster list NAME SERVERS AGENTS LOADBALANCER east 1/1 0/0 true west 1/1 0/0 true @@ -77,10 +77,10 @@ controllers and links are generated for both clusters. ```sh # Install Linkerd and multicluster, output to check should be a success -./install.sh +$ ./install.sh # Next, link the two clusters together -./link.sh +$ ./link.sh ``` Perfect! If you've made it this far with no errors, then it's a good sign. In @@ -100,17 +100,17 @@ communication. First, we will deploy our pods and services: ```sh # deploy services and mesh namespaces -./deploy.sh +$ ./deploy.sh # verify both clusters # # verify east -kubectl --context=k3d-east get pods +$ kubectl --context=k3d-east get pods NAME READY STATUS RESTARTS AGE curl-56dc7d945d-96r6p 2/2 Running 0 7s # verify west has headless service -kubectl --context=k3d-west get services +$ kubectl --context=k3d-west get services NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE kubernetes ClusterIP 10.43.0.1 443/TCP 10m nginx-svc ClusterIP None 80/TCP 8s @@ -118,7 +118,7 @@ nginx-svc ClusterIP None 80/TCP 8s # verify west has statefulset # # this may take a while to come up -kubectl --context=k3d-west get pods +$ kubectl --context=k3d-west get pods NAME READY STATUS RESTARTS AGE nginx-set-0 2/2 Running 0 53s nginx-set-1 2/2 Running 0 43s @@ -129,7 +129,7 @@ Before we go further, let's have a look at the endpoints object for the `nginx-svc`: ```sh -kubectl --context=k3d-west get endpoints nginx-svc -o yaml +$ kubectl --context=k3d-west get endpoints nginx-svc -o yaml ... subsets: - addresses: @@ -169,23 +169,23 @@ would get an answer back. We can test this out by applying the curl pod to the `west` cluster: ```sh -kubectl --context=k3d-west apply -f east/curl.yml -kubectl --context=k3d-west get pods +$ kubectl --context=k3d-west apply -f east/curl.yml +$ kubectl --context=k3d-west get pods NAME READY STATUS RESTARTS AGE nginx-set-0 2/2 Running 0 5m8s nginx-set-1 2/2 Running 0 4m58s nginx-set-2 2/2 Running 0 4m51s curl-56dc7d945d-s4n8j 0/2 PodInitializing 0 4s -kubectl --context=k3d-west exec -it curl-56dc7d945d-s4n8j -c curl -- sh -/$ # prompt for curl pod +$ kubectl --context=k3d-west exec -it curl-56dc7d945d-s4n8j -c curl -- sh +/$ prompt for curl pod ``` If we now curl one of these instances, we will get back a response. ```sh # exec'd on the pod -curl nginx-set-0.nginx-svc.default.svc.west.cluster.local +$ curl nginx-set-0.nginx-svc.default.svc.west.cluster.local " @@ -217,7 +217,7 @@ Now, let's do the same, but this time from the `east` cluster. We will first export the service. ```sh -kubectl --context=k3d-west label service nginx-svc mirror.linkerd.io/exported="true" +$ kubectl --context=k3d-west label service nginx-svc mirror.linkerd.io/exported="true" service/nginx-svc labeled kubectl --context=k3d-east get services @@ -234,7 +234,7 @@ endpoints for `nginx-svc-west` will have the same hostnames, but each hostname will point to one of the services we see above: ```sh -kubectl --context=k3d-east get endpoints nginx-svc-k3d-west -o yaml +$ kubectl --context=k3d-east get endpoints nginx-svc-k3d-west -o yaml subsets: - addresses: - hostname: nginx-set-0 @@ -250,7 +250,7 @@ cluster (`west`), will be mirrored as a clusterIP service. We will see in a second why this matters. ```sh -kubectl --context=k3d-east get pods +$ kubectl --context=k3d-east get pods NAME READY STATUS RESTARTS AGE curl-56dc7d945d-96r6p 2/2 Running 0 23m @@ -260,7 +260,7 @@ kubectl --context=k3d-east exec curl-56dc7d945d-96r6p -it -c curl -- sh # however, the service and cluster domain will now be different, since we # are in a different cluster. # -curl nginx-set-0.nginx-svc-k3d-west.default.svc.east.cluster.local +$ curl nginx-set-0.nginx-svc-k3d-west.default.svc.east.cluster.local @@ -328,8 +328,8 @@ validation. To clean-up, you can remove both clusters entirely using the k3d CLI: ```sh -k3d cluster delete east +$ k3d cluster delete east cluster east deleted -k3d cluster delete west +$ k3d cluster delete west cluster west deleted ``` diff --git a/linkerd.io/content/2-edge/tasks/multicluster.md b/linkerd.io/content/2-edge/tasks/multicluster.md index 2779b7616a..3a80b3f3ed 100644 --- a/linkerd.io/content/2-edge/tasks/multicluster.md +++ b/linkerd.io/content/2-edge/tasks/multicluster.md @@ -506,9 +506,9 @@ To cleanup the multicluster control plane, you can run: ```bash # Delete the link CR -kubectl --context=west -n linkerd-multicluster delete links east +$ kubectl --context=west -n linkerd-multicluster delete links east # Delete the test namespace and uninstall multicluster -for ctx in west east; do \ +$ for ctx in west east; do \ kubectl --context=${ctx} delete ns test; \ linkerd --context=${ctx} multicluster uninstall | kubectl --context=${ctx} delete -f - ; \ done diff --git a/linkerd.io/content/2-edge/tasks/securing-linkerd-tap.md b/linkerd.io/content/2-edge/tasks/securing-linkerd-tap.md index 639f81692f..8a802c890c 100644 --- a/linkerd.io/content/2-edge/tasks/securing-linkerd-tap.md +++ b/linkerd.io/content/2-edge/tasks/securing-linkerd-tap.md @@ -60,7 +60,7 @@ kubectl auth can-i watch deployments.tap.linkerd.io -n emojivoto --as $(whoami) You can also use the Linkerd CLI's `--as` flag to confirm: ```bash -linkerd viz tap -n linkerd deploy/linkerd-controller --as $(whoami) +$ linkerd viz tap -n linkerd deploy/linkerd-controller --as $(whoami) Cannot connect to Linkerd Viz: namespaces is forbidden: User "XXXX" cannot list resource "namespaces" in API group "" at the cluster scope Validate the install with: linkerd viz check ... @@ -77,7 +77,7 @@ To enable tap access to all resources in all namespaces, you may bind your user to the `linkerd-linkerd-tap-admin` ClusterRole, installed by default: ```bash -kubectl describe clusterroles/linkerd-linkerd-viz-tap-admin +$ kubectl describe clusterroles/linkerd-linkerd-viz-tap-admin Name: linkerd-linkerd-viz-tap-admin Labels: component=tap linkerd.io/extension=viz @@ -109,7 +109,7 @@ kubectl create clusterrolebinding \ You can verify you now have tap access with: ```bash -linkerd viz tap -n linkerd deploy/linkerd-controller --as $(whoami) +$ linkerd viz tap -n linkerd deploy/linkerd-controller --as $(whoami) req id=3:0 proxy=in src=10.244.0.1:37392 dst=10.244.0.13:9996 tls=not_provided_by_remote :method=GET :authority=10.244.0.13:9996 :path=/ping ... ``` @@ -143,14 +143,14 @@ Because GCloud provides this additional level of access, there are cases where not. To validate this, check whether your GCloud user has Tap access: ```bash -kubectl auth can-i watch pods.tap.linkerd.io --all-namespaces +$ kubectl auth can-i watch pods.tap.linkerd.io --all-namespaces yes ``` And then validate whether your RBAC user has Tap access: ```bash -kubectl auth can-i watch pods.tap.linkerd.io --all-namespaces --as $(gcloud config get-value account) +$ kubectl auth can-i watch pods.tap.linkerd.io --all-namespaces --as $(gcloud config get-value account) no - no RBAC policy matched ``` @@ -187,14 +187,14 @@ privileges necessary to tap resources. To confirm: ```bash -kubectl auth can-i watch pods.tap.linkerd.io --all-namespaces --as system:serviceaccount:linkerd-viz:web +$ kubectl auth can-i watch pods.tap.linkerd.io --all-namespaces --as system:serviceaccount:linkerd-viz:web yes ``` This access is enabled via a `linkerd-linkerd-viz-web-admin` ClusterRoleBinding: ```bash -kubectl describe clusterrolebindings/linkerd-linkerd-viz-web-admin +$ kubectl describe clusterrolebindings/linkerd-linkerd-viz-web-admin Name: linkerd-linkerd-viz-web-admin Labels: component=web linkerd.io/extensions=viz @@ -227,6 +227,6 @@ kubectl delete clusterrolebindings/linkerd-linkerd-viz-web-admin To confirm: ```bash -kubectl auth can-i watch pods.tap.linkerd.io --all-namespaces --as system:serviceaccount:linkerd-viz:web +$ kubectl auth can-i watch pods.tap.linkerd.io --all-namespaces --as system:serviceaccount:linkerd-viz:web no ``` diff --git a/linkerd.io/content/2-edge/tasks/troubleshooting.md b/linkerd.io/content/2-edge/tasks/troubleshooting.md index c65e8fb63c..2cd2baef60 100644 --- a/linkerd.io/content/2-edge/tasks/troubleshooting.md +++ b/linkerd.io/content/2-edge/tasks/troubleshooting.md @@ -230,7 +230,7 @@ Example failure: Ensure the Linkerd ClusterRoles exist: ```bash -kubectl get clusterroles | grep linkerd +$ kubectl get clusterroles | grep linkerd linkerd-linkerd-destination 9d linkerd-linkerd-identity 9d linkerd-linkerd-proxy-injector 9d @@ -240,7 +240,7 @@ linkerd-policy 9d Also ensure you have permission to create ClusterRoles: ```bash -kubectl auth can-i create clusterroles +$ kubectl auth can-i create clusterroles yes ``` @@ -257,7 +257,7 @@ Example failure: Ensure the Linkerd ClusterRoleBindings exist: ```bash -kubectl get clusterrolebindings | grep linkerd +$ kubectl get clusterrolebindings | grep linkerd linkerd-linkerd-destination 9d linkerd-linkerd-identity 9d linkerd-linkerd-proxy-injector 9d @@ -267,7 +267,7 @@ linkerd-destination-policy 9d Also ensure you have permission to create ClusterRoleBindings: ```bash -kubectl auth can-i create clusterrolebindings +$ kubectl auth can-i create clusterrolebindings yes ``` @@ -284,7 +284,7 @@ Example failure: Ensure the Linkerd ServiceAccounts exist: ```bash -kubectl -n linkerd get serviceaccounts +$ kubectl -n linkerd get serviceaccounts NAME SECRETS AGE default 1 14m linkerd-destination 1 14m @@ -297,7 +297,7 @@ Also ensure you have permission to create ServiceAccounts in the Linkerd namespace: ```bash -kubectl -n linkerd auth can-i create serviceaccounts +$ kubectl -n linkerd auth can-i create serviceaccounts yes ``` @@ -314,7 +314,7 @@ Example failure: Ensure the Linkerd CRD exists: ```bash -kubectl get customresourcedefinitions +$ kubectl get customresourcedefinitions NAME CREATED AT serviceprofiles.linkerd.io 2019-04-25T21:47:31Z ``` @@ -322,7 +322,7 @@ serviceprofiles.linkerd.io 2019-04-25T21:47:31Z Also ensure you have permission to create CRDs: ```bash -kubectl auth can-i create customresourcedefinitions +$ kubectl auth can-i create customresourcedefinitions yes ``` @@ -339,14 +339,14 @@ Example failure: Ensure the Linkerd MutatingWebhookConfigurations exists: ```bash -kubectl get mutatingwebhookconfigurations | grep linkerd +$ kubectl get mutatingwebhookconfigurations | grep linkerd linkerd-proxy-injector-webhook-config 2019-07-01T13:13:26Z ``` Also ensure you have permission to create MutatingWebhookConfigurations: ```bash -kubectl auth can-i create mutatingwebhookconfigurations +$ kubectl auth can-i create mutatingwebhookconfigurations yes ``` @@ -363,14 +363,14 @@ Example failure: Ensure the Linkerd ValidatingWebhookConfiguration exists: ```bash -kubectl get validatingwebhookconfigurations | grep linkerd +$ kubectl get validatingwebhookconfigurations | grep linkerd linkerd-sp-validator-webhook-config 2019-07-01T13:13:26Z ``` Also ensure you have permission to create ValidatingWebhookConfigurations: ```bash -kubectl auth can-i create validatingwebhookconfigurations +$ kubectl auth can-i create validatingwebhookconfigurations yes ``` @@ -418,7 +418,7 @@ Example failure: Ensure the Linkerd ConfigMap exists: ```bash -kubectl -n linkerd get configmap/linkerd-config +$ kubectl -n linkerd get configmap/linkerd-config NAME DATA AGE linkerd-config 3 61m ``` @@ -426,7 +426,7 @@ linkerd-config 3 61m Also ensure you have permission to create ConfigMaps: ```bash -kubectl -n linkerd auth can-i create configmap +$ kubectl -n linkerd auth can-i create configmap yes ``` @@ -780,7 +780,7 @@ Example failure: Verify the state of the control plane pods with: ```bash -kubectl -n linkerd get po +$ kubectl -n linkerd get po NAME READY STATUS RESTARTS AGE linkerd-destination-5fd7b5d466-szgqm 2/2 Running 1 12m linkerd-identity-54df78c479-hbh5m 2/2 Running 0 12m @@ -862,7 +862,7 @@ Ensure you can connect to the Linkerd version check endpoint from the environment the `linkerd` cli is running: ```bash -curl "https://versioncheck.linkerd.io/version.json?version=edge-19.1.2&uuid=test-uuid&source=cli" +$ curl "https://versioncheck.linkerd.io/version.json?version=edge-19.1.2&uuid=test-uuid&source=cli" {"stable":"stable-2.1.0","edge":"edge-19.1.2"} ``` @@ -1155,7 +1155,7 @@ linkerd-linkerd-cni-cni false RunAsAny RunAsAny RunAsAny RunAs Also ensure you have permission to create ConfigMaps: ```bash -kubectl auth can-i create ConfigMaps +$ kubectl auth can-i create ConfigMaps yes ``` @@ -1310,7 +1310,7 @@ Make sure multicluster extension is correctly installed and that the `links.multicluster.linkerd.io` CRD is present. ```bash -kubectl get crds | grep multicluster +$ kubectl get crds | grep multicluster NAME CREATED AT links.multicluster.linkerd.io 2021-03-10T09:58:10Z ``` @@ -1400,7 +1400,7 @@ the rules section. Expected rules for `linkerd-service-mirror-access-local-resources` cluster role: ```bash -kubectl --context=local get clusterrole linkerd-service-mirror-access-local-resources -o yaml +$ kubectl --context=local get clusterrole linkerd-service-mirror-access-local-resources -o yaml kind: ClusterRole metadata: labels: @@ -1466,7 +1466,7 @@ everything to start up. If this is a permanent error, you'll want to validate the state of the controller pod with: ```bash -kubectl --all-namespaces get po --selector linkerd.io/control-plane-component=linkerd-service-mirror +$ kubectl --all-namespaces get po --selector linkerd.io/control-plane-component=linkerd-service-mirror NAME READY STATUS RESTARTS AGE linkerd-service-mirror-7bb8ff5967-zg265 2/2 Running 0 50m ``` @@ -1612,7 +1612,7 @@ Example failure: Ensure the linkerd-viz extension ClusterRoles exist: ```bash -kubectl get clusterroles | grep linkerd-viz +$ kubectl get clusterroles | grep linkerd-viz linkerd-linkerd-viz-metrics-api 2021-01-26T18:02:17Z linkerd-linkerd-viz-prometheus 2021-01-26T18:02:17Z linkerd-linkerd-viz-tap 2021-01-26T18:02:17Z @@ -1640,7 +1640,7 @@ Example failure: Ensure the linkerd-viz extension ClusterRoleBindings exist: ```bash -kubectl get clusterrolebindings | grep linkerd-viz +$ kubectl get clusterrolebindings | grep linkerd-viz linkerd-linkerd-viz-metrics-api ClusterRole/linkerd-linkerd-viz-metrics-api 18h linkerd-linkerd-viz-prometheus ClusterRole/linkerd-linkerd-viz-prometheus 18h linkerd-linkerd-viz-tap ClusterRole/linkerd-linkerd-viz-tap 18h @@ -1652,7 +1652,7 @@ linkerd-linkerd-viz-web-check ClusterRole/linkerd-linke Also ensure you have permission to create ClusterRoleBindings: ```bash -kubectl auth can-i create clusterrolebindings +$ kubectl auth can-i create clusterrolebindings yes ``` @@ -1741,7 +1741,7 @@ requirements in the cluster: Ensure all the linkerd-viz pods are injected ```bash -kubectl -n linkerd-viz get pods +$ kubectl -n linkerd-viz get pods NAME READY STATUS RESTARTS AGE grafana-68cddd7cc8-nrv4h 2/2 Running 3 18h metrics-api-77f684f7c7-hnw8r 2/2 Running 2 18h @@ -1765,7 +1765,7 @@ Make sure that the `proxy-injector` is working correctly by running Ensure all the linkerd-viz pods are running with 2/2 ```bash -kubectl -n linkerd-viz get pods +$ kubectl -n linkerd-viz get pods NAME READY STATUS RESTARTS AGE grafana-68cddd7cc8-nrv4h 2/2 Running 3 18h metrics-api-77f684f7c7-hnw8r 2/2 Running 2 18h @@ -1789,12 +1789,12 @@ Make sure that the `proxy-injector` is working correctly by running Ensure all the prometheus related resources are present and running correctly. ```bash -❯ kubectl -n linkerd-viz get deploy,cm | grep prometheus +$ kubectl -n linkerd-viz get deploy,cm | grep prometheus deployment.apps/prometheus 1/1 1 1 3m18s configmap/prometheus-config 1 3m18s -❯ kubectl get clusterRoleBindings | grep prometheus +$ kubectl get clusterRoleBindings | grep prometheus linkerd-linkerd-viz-prometheus ClusterRole/linkerd-linkerd-viz-prometheus 3m37s -❯ kubectl get clusterRoles | grep prometheus +$ kubectl get clusterRoles | grep prometheus linkerd-linkerd-viz-prometheus 2021-02-26T06:03:11Zh ``` @@ -1810,7 +1810,7 @@ Example failure: Verify that the metrics API pod is running correctly ```bash -❯ kubectl -n linkerd-viz get pods +$ kubectl -n linkerd-viz get pods NAME READY STATUS RESTARTS AGE metrics-api-7bb8cb8489-cbq4m 2/2 Running 0 4m58s tap-injector-6b9bc6fc4-cgbr4 2/2 Running 0 4m56s @@ -1936,7 +1936,7 @@ Ensure you can connect to the Linkerd Buoyant version check endpoint from the environment the `linkerd` cli is running: ```bash -curl https://buoyant.cloud/version.json +$ curl https://buoyant.cloud/version.json {"linkerd-buoyant":"v0.4.4"} ``` @@ -2001,7 +2001,7 @@ linkerd-buoyant install | kubectl apply -f - Ensure that the cluster role exists: ```bash -kubectl get clusterrole buoyant-cloud-agent +$ kubectl get clusterrole buoyant-cloud-agent NAME CREATED AT buoyant-cloud-agent 2020-11-13T00:59:50Z ``` @@ -2009,7 +2009,7 @@ buoyant-cloud-agent 2020-11-13T00:59:50Z Also ensure you have permission to create ClusterRoles: ```bash -kubectl auth can-i create ClusterRoles +$ kubectl auth can-i create clusterroles yes ``` @@ -2024,7 +2024,7 @@ yes Ensure that the cluster role binding exists: ```bash -kubectl get clusterrolebinding buoyant-cloud-agent +$ kubectl get clusterrolebinding buoyant-cloud-agent NAME ROLE AGE buoyant-cloud-agent ClusterRole/buoyant-cloud-agent 301d ``` @@ -2032,7 +2032,7 @@ buoyant-cloud-agent ClusterRole/buoyant-cloud-agent 301d Also ensure you have permission to create ClusterRoleBindings: ```bash -kubectl auth can-i create ClusterRoleBindings +$ kubectl auth can-i create ClusterRoleBindings yes ``` @@ -2047,7 +2047,7 @@ yes Ensure that the service account exists: ```bash -kubectl -n buoyant-cloud get serviceaccount buoyant-cloud-agent +$ kubectl -n buoyant-cloud get serviceaccount buoyant-cloud-agent NAME SECRETS AGE buoyant-cloud-agent 1 301d ``` @@ -2055,7 +2055,7 @@ buoyant-cloud-agent 1 301d Also ensure you have permission to create ServiceAccounts: ```bash -kubectl -n buoyant-cloud auth can-i create ServiceAccount +$ kubectl -n buoyant-cloud auth can-i create ServiceAccount yes ``` @@ -2070,7 +2070,7 @@ yes Ensure that the secret exists: ```bash -kubectl -n buoyant-cloud get secret buoyant-cloud-id +$ kubectl -n buoyant-cloud get secret buoyant-cloud-id NAME TYPE DATA AGE buoyant-cloud-id Opaque 4 301d ``` @@ -2078,7 +2078,7 @@ buoyant-cloud-id Opaque 4 301d Also ensure you have permission to create ServiceAccounts: ```bash -kubectl -n buoyant-cloud auth can-i create ServiceAccount +$ kubectl -n buoyant-cloud auth can-i create ServiceAccount yes ``` @@ -2116,7 +2116,7 @@ everything to start up. If this is a permanent error, you'll want to validate the state of the `buoyant-cloud-agent` Deployment with: ```bash -kubectl -n buoyant-cloud get po --selector app=buoyant-cloud-agent +$ kubectl -n buoyant-cloud get po --selector app=buoyant-cloud-agent NAME READY STATUS RESTARTS AGE buoyant-cloud-agent-6b8c6888d7-htr7d 2/2 Running 0 156m ``` @@ -2139,7 +2139,7 @@ Ensure the `buoyant-cloud-agent` pod is injected, the `READY` column should show `2/2`: ```bash -kubectl -n buoyant-cloud get pods --selector app=buoyant-cloud-agent +$ kubectl -n buoyant-cloud get pods --selector app=buoyant-cloud-agent NAME READY STATUS RESTARTS AGE buoyant-cloud-agent-6b8c6888d7-htr7d 2/2 Running 0 161m ``` @@ -2158,7 +2158,7 @@ Make sure that the `proxy-injector` is working correctly by running Check the version with: ```bash -linkerd-buoyant version +$ linkerd-buoyant version CLI version: v0.4.4 Agent version: v0.4.4 ``` @@ -2166,7 +2166,7 @@ Agent version: v0.4.4 To update to the latest version: ```bash -linkerd-buoyant install | kubectl apply -f - +$ linkerd-buoyant install | kubectl apply -f - ``` ### √ buoyant-cloud-agent Deployment is running a single pod @@ -2217,7 +2217,7 @@ everything to start up. If this is a permanent error, you'll want to validate the state of the `buoyant-cloud-metrics` DaemonSet with: ```bash -kubectl -n buoyant-cloud get po --selector app=buoyant-cloud-metrics +$ kubectl -n buoyant-cloud get po --selector app=buoyant-cloud-metrics NAME READY STATUS RESTARTS AGE buoyant-cloud-metrics-kt9mv 2/2 Running 0 163m buoyant-cloud-metrics-q8jhj 2/2 Running 0 163m @@ -2243,7 +2243,7 @@ Ensure the `buoyant-cloud-metrics` pods are injected, the `READY` column should show `2/2`: ```bash -kubectl -n buoyant-cloud get pods --selector app=buoyant-cloud-metrics +$ kubectl -n buoyant-cloud get pods --selector app=buoyant-cloud-metrics NAME READY STATUS RESTARTS AGE buoyant-cloud-metrics-kt9mv 2/2 Running 0 166m buoyant-cloud-metrics-q8jhj 2/2 Running 0 166m @@ -2265,7 +2265,7 @@ Make sure that the `proxy-injector` is working correctly by running Check the version with: ```bash -kubectl -n buoyant-cloud get daemonset/buoyant-cloud-metrics -o jsonpath='{.metadata.labels}' +$ kubectl -n buoyant-cloud get daemonset/buoyant-cloud-metrics -o jsonpath='{.metadata.labels}' {"app.kubernetes.io/name":"metrics","app.kubernetes.io/part-of":"buoyant-cloud","app.kubernetes.io/version":"v0.4.4"} ``` diff --git a/linkerd.io/content/2.10/tasks/troubleshooting.md b/linkerd.io/content/2.10/tasks/troubleshooting.md index 7513085c82..ee27242c04 100644 --- a/linkerd.io/content/2.10/tasks/troubleshooting.md +++ b/linkerd.io/content/2.10/tasks/troubleshooting.md @@ -26,7 +26,7 @@ installation, that namespace should not exist. To check with a different namespace, run: ```bash -$ linkerd check --pre --linkerd-namespace linkerd-test +linkerd check --pre --linkerd-namespace linkerd-test ``` ### √ can create Kubernetes resources {#pre-k8s-cluster-k8s} @@ -266,16 +266,16 @@ and also in the context of a multi-stage setup, for example: ```bash # install cluster-wide resources (first stage) -$ linkerd install config | kubectl apply -f - +linkerd install config | kubectl apply -f - # validate successful cluster-wide resources installation -$ linkerd check config +linkerd check config # install Linkerd control plane -$ linkerd install control-plane | kubectl apply -f - +linkerd install control-plane | kubectl apply -f - # validate successful control-plane installation -$ linkerd check +linkerd check ``` ### √ control plane Namespace exists {#l5d-existence-ns} @@ -855,11 +855,11 @@ This check indicates a connectivity failure between the cli and the Linkerd control plane. To verify connectivity, manually connect to a control plane pod: ```bash -$ kubectl -n linkerd port-forward \ +kubectl -n linkerd port-forward \ $(kubectl -n linkerd get po \ --selector=linkerd.io/control-plane-component=identity \ -o jsonpath='{.items[*].metadata.name}') \ - 9995:9995 +9995:9995 ``` ...and then curl the `/metrics` endpoint: @@ -883,7 +883,7 @@ Ensure you can connect to the Linkerd version check endpoint from the environment the `linkerd` cli is running: ```bash -curl "https://versioncheck.linkerd.io/version.json?version=edge-19.1.2&uuid=test-uuid&source=cli" +$ curl "https://versioncheck.linkerd.io/version.json?version=edge-19.1.2&uuid=test-uuid&source=cli" {"stable":"stable-2.1.0","edge":"edge-19.1.2"} ``` @@ -922,7 +922,7 @@ normally. Example failure: ```bash -linkerd check --proxy --namespace foo +$ linkerd check --proxy --namespace foo ... × data plane namespace exists The "foo" namespace does not exist @@ -960,7 +960,7 @@ Ensure Prometheus can connect to each `linkerd-proxy` via the Prometheus dashboard: ```bash -$ kubectl -n linkerd port-forward svc/linkerd-prometheus 9090 +kubectl -n linkerd port-forward svc/linkerd-prometheus 9090 ``` ...and then browse to @@ -1324,7 +1324,7 @@ Example failure: Ensure that all the CNI pods are running: ```bash -$ kubectl get po -n linkerd-cni +$ kubectl get po -n linkerd-cn NAME READY STATUS RESTARTS AGE linkerd-cni-rzp2q 1/1 Running 0 9m20s linkerd-cni-mf564 1/1 Running 0 9m22s @@ -1617,7 +1617,7 @@ linkerd-linkerd-viz-web-check 2021-01-2 Also ensure you have permission to create ClusterRoles: ```bash -kubectl auth can-i create clusterroles +$ kubectl auth can-i create clusterroles yes ``` @@ -1766,12 +1766,12 @@ Make sure that the `proxy-injector` is working correctly by running Ensure all the prometheus related resources are present and running correctly. ```bash -$ kubectl -n linkerd-viz get deploy,cm | grep prometheus +❯ kubectl -n linkerd-viz get deploy,cm | grep prometheus deployment.apps/prometheus 1/1 1 1 3m18s configmap/prometheus-config 1 3m18s -$ kubectl get clusterRoleBindings | grep prometheus +❯ kubectl get clusterRoleBindings | grep prometheus linkerd-linkerd-viz-prometheus ClusterRole/linkerd-linkerd-viz-prometheus 3m37s -$ kubectl get clusterRoles | grep prometheus +❯ kubectl get clusterRoles | grep prometheus linkerd-linkerd-viz-prometheus 2021-02-26T06:03:11Zh ``` @@ -1787,7 +1787,7 @@ Example failure: Verify that the metrics API pod is running correctly ```bash -$ kubectl -n linkerd-viz get pods +❯ kubectl -n linkerd-viz get pods NAME READY STATUS RESTARTS AGE metrics-api-7bb8cb8489-cbq4m 2/2 Running 0 4m58s tap-injector-6b9bc6fc4-cgbr4 2/2 Running 0 4m56s @@ -1967,7 +1967,7 @@ Ensure you can connect to the Linkerd Buoyant version check endpoint from the environment the `linkerd` cli is running: ```bash -curl --proto '=https' --tlsv1.2 -sSfL https://buoyant.cloud/version.json +$ curl --proto '=https' --tlsv1.2 -sSfL https://buoyant.cloud/version.json {"linkerd-buoyant":"v0.4.4"} ``` @@ -2040,7 +2040,7 @@ buoyant-cloud-agent 2020-11-13T00:59:50Z Also ensure you have permission to create ClusterRoles: ```bash -$ kubectl auth can-i create clusterroles +$ kubectl auth can-i create ClusterRoles yes ``` @@ -2124,14 +2124,14 @@ yes Ensure the `buoyant-cloud-agent` Deployment exists: ```bash -$ kubectl -n buoyant-cloud get deploy/buoyant-cloud-agent +kubectl -n buoyant-cloud get deploy/buoyant-cloud-agent ``` If the Deployment does not exist, the `linkerd-buoyant` installation may be missing or incomplete. To reinstall the extension: ```bash -$ linkerd-buoyant install | kubectl apply -f - +linkerd-buoyant install | kubectl apply -f - ``` ### √ buoyant-cloud-agent Deployment is running diff --git a/linkerd.io/content/2.11/tasks/multicluster-using-statefulsets.md b/linkerd.io/content/2.11/tasks/multicluster-using-statefulsets.md index c720c09563..009dfe24d8 100644 --- a/linkerd.io/content/2.11/tasks/multicluster-using-statefulsets.md +++ b/linkerd.io/content/2.11/tasks/multicluster-using-statefulsets.md @@ -60,10 +60,10 @@ everything. ```sh # create k3d clusters -./create.sh +$ ./create.sh # list the clusters -k3d cluster list +$ k3d cluster list NAME SERVERS AGENTS LOADBALANCER east 1/1 0/0 true west 1/1 0/0 true @@ -71,17 +71,16 @@ west 1/1 0/0 true Once our clusters are created, we will install Linkerd and the multi-cluster extension. Finally, once both are installed, we need to link the two clusters -together so their services may be mirrored. To enable support for headless -services, we will pass an additional `--set "enableHeadlessServices=true"` flag -to `linkerd multicluster link`. As before, these steps are automated through the -provided scripts, but feel free to have a look! +together so their services may be mirrored. As before, these steps are automated +through the provided scripts; please give them a look and see how the +controllers and links are generated for both clusters. ```sh # Install Linkerd and multicluster, output to check should be a success -./install.sh +$ ./install.sh # Next, link the two clusters together -./link.sh +$ ./link.sh ``` Perfect! If you've made it this far with no errors, then it's a good sign. In @@ -101,17 +100,17 @@ communication. First, we will deploy our pods and services: ```sh # deploy services and mesh namespaces -./deploy.sh +$ ./deploy.sh # verify both clusters # # verify east -kubectl --context=k3d-east get pods +$ kubectl --context=k3d-east get pods NAME READY STATUS RESTARTS AGE curl-56dc7d945d-96r6p 2/2 Running 0 7s # verify west has headless service -kubectl --context=k3d-west get services +$ kubectl --context=k3d-west get services NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE kubernetes ClusterIP 10.43.0.1 443/TCP 10m nginx-svc ClusterIP None 80/TCP 8s @@ -119,7 +118,7 @@ nginx-svc ClusterIP None 80/TCP 8s # verify west has statefulset # # this may take a while to come up -kubectl --context=k3d-west get pods +$ kubectl --context=k3d-west get pods NAME READY STATUS RESTARTS AGE nginx-set-0 2/2 Running 0 53s nginx-set-1 2/2 Running 0 43s @@ -130,7 +129,7 @@ Before we go further, let's have a look at the endpoints object for the `nginx-svc`: ```sh -kubectl --context=k3d-west get endpoints nginx-svc -o yaml +$ kubectl --context=k3d-west get endpoints nginx-svc -o yaml ... subsets: - addresses: @@ -170,15 +169,15 @@ would get an answer back. We can test this out by applying the curl pod to the `west` cluster: ```sh -kubectl --context=k3d-west apply -f east/curl.yml -kubectl --context=k3d-west get pods +$ kubectl --context=k3d-west apply -f east/curl.yml +$ kubectl --context=k3d-west get pods NAME READY STATUS RESTARTS AGE nginx-set-0 2/2 Running 0 5m8s nginx-set-1 2/2 Running 0 4m58s nginx-set-2 2/2 Running 0 4m51s curl-56dc7d945d-s4n8j 0/2 PodInitializing 0 4s -kubectl --context=k3d-west exec -it curl-56dc7d945d-s4n8j -c curl -- bin/sh +$ kubectl --context=k3d-west exec -it curl-56dc7d945d-s4n8j -c curl -- bin/sh /# prompt for curl pod ``` @@ -186,7 +185,7 @@ If we now curl one of these instances, we will get back a response. ```sh # exec'd on the pod -/ curl nginx-set-0.nginx-svc.default.svc.west.cluster.local +$ curl nginx-set-0.nginx-svc.default.svc.west.cluster.local " @@ -218,7 +217,7 @@ Now, let's do the same, but this time from the `east` cluster. We will first export the service. ```sh -kubectl --context=k3d-west label service nginx-svc mirror.linkerd.io/exported="true" +$ kubectl --context=k3d-west label service nginx-svc mirror.linkerd.io/exported="true" service/nginx-svc labeled kubectl --context=k3d-east get services @@ -235,7 +234,7 @@ endpoints for `nginx-svc-west` will have the same hostnames, but each hostname will point to one of the services we see above: ```sh -kubectl --context=k3d-east get endpoints nginx-svc-west -o yaml +$ kubectl --context=k3d-east get endpoints nginx-svc-west -o yaml subsets: - addresses: - hostname: nginx-set-0 @@ -251,17 +250,17 @@ cluster (`west`), will be mirrored as a clusterIP service. We will see in a second why this matters. ```sh -kubectl --context=k3d-east get pods +$ kubectl --context=k3d-east get pods NAME READY STATUS RESTARTS AGE curl-56dc7d945d-96r6p 2/2 Running 0 23m # exec and curl -kubectl --context=k3d-east exec pod curl-56dc7d945d-96r6p -it -c curl -- bin/sh +$ kubectl --context=k3d-east exec pod curl-56dc7d945d-96r6p -it -c curl -- bin/sh # we want to curl the same hostname we see in the endpoints object above. # however, the service and cluster domain will now be different, since we # are in a different cluster. # -/ curl nginx-set-0.nginx-svc-west.default.svc.east.cluster.local +$ curl nginx-set-0.nginx-svc-k3d-west.default.svc.east.cluster.local @@ -329,8 +328,8 @@ validation. To clean-up, you can remove both clusters entirely using the k3d CLI: ```sh -k3d cluster delete east +$ k3d cluster delete east cluster east deleted -k3d cluster delete west +$ k3d cluster delete west cluster west deleted ``` diff --git a/linkerd.io/content/2.11/tasks/troubleshooting.md b/linkerd.io/content/2.11/tasks/troubleshooting.md index 7de77a6be6..3a988974a6 100644 --- a/linkerd.io/content/2.11/tasks/troubleshooting.md +++ b/linkerd.io/content/2.11/tasks/troubleshooting.md @@ -990,7 +990,7 @@ Ensure you can connect to the Linkerd version check endpoint from the environment the `linkerd` cli is running: ```bash -curl "https://versioncheck.linkerd.io/version.json?version=edge-19.1.2&uuid=test-uuid&source=cli" +$ curl "https://versioncheck.linkerd.io/version.json?version=edge-19.1.2&uuid=test-uuid&source=cli" {"stable":"stable-2.1.0","edge":"edge-19.1.2"} ``` @@ -1049,7 +1049,7 @@ normally. Example failure: ```bash -linkerd check --proxy --namespace foo +$ linkerd check --proxy --namespace foo ... × data plane namespace exists The "foo" namespace does not exist @@ -1198,7 +1198,7 @@ Ensure the kube-system namespace has the `config.linkerd.io/admission-webhooks:disabled` label: ```bash -kubectl get namespace kube-system -oyaml +$ kubectl get namespace kube-system -oyaml kind: Namespace apiVersion: v1 metadata: @@ -1471,7 +1471,7 @@ Example failure: Ensure that all the CNI pods are running: ```bash -$ kubectl get po -n linkerd-cni +$ kubectl get po -n linkerd-cn NAME READY STATUS RESTARTS AGE linkerd-cni-rzp2q 1/1 Running 0 9m20s linkerd-cni-mf564 1/1 Running 0 9m22s @@ -1621,7 +1621,7 @@ rules: Expected rules for `linkerd-service-mirror-read-remote-creds` role: ```bash -kubectl --context=local get role linkerd-service-mirror-read-remote-creds -n linkerd-multicluster -o yaml +$ kubectl --context=local get role linkerd-service-mirror-read-remote-creds -n linkerd-multicluster -o yaml kind: Role metadata: labels: @@ -1753,7 +1753,7 @@ Example failure: Ensure the linkerd-viz extension ClusterRoles exist: ```bash -kubectl get clusterroles | grep linkerd-viz +$ kubectl get clusterroles | grep linkerd-viz linkerd-linkerd-viz-metrics-api 2021-01-26T18:02:17Z linkerd-linkerd-viz-prometheus 2021-01-26T18:02:17Z linkerd-linkerd-viz-tap 2021-01-26T18:02:17Z @@ -1764,7 +1764,7 @@ linkerd-linkerd-viz-web-check 2021-01-2 Also ensure you have permission to create ClusterRoles: ```bash -kubectl auth can-i create clusterroles +$ kubectl auth can-i create clusterroles yes ``` @@ -1913,12 +1913,12 @@ Make sure that the `proxy-injector` is working correctly by running Ensure all the prometheus related resources are present and running correctly. ```bash -$ kubectl -n linkerd-viz get deploy,cm | grep prometheus +❯ kubectl -n linkerd-viz get deploy,cm | grep prometheus deployment.apps/prometheus 1/1 1 1 3m18s configmap/prometheus-config 1 3m18s -$ kubectl get clusterRoleBindings | grep prometheus +❯ kubectl get clusterRoleBindings | grep prometheus linkerd-linkerd-viz-prometheus ClusterRole/linkerd-linkerd-viz-prometheus 3m37s -$ kubectl get clusterRoles | grep prometheus +❯ kubectl get clusterRoles | grep prometheus linkerd-linkerd-viz-prometheus 2021-02-26T06:03:11Zh ``` @@ -1934,7 +1934,7 @@ Example failure: Verify that the metrics API pod is running correctly ```bash -$ kubectl -n linkerd-viz get pods +❯ kubectl -n linkerd-viz get pods NAME READY STATUS RESTARTS AGE metrics-api-7bb8cb8489-cbq4m 2/2 Running 0 4m58s tap-injector-6b9bc6fc4-cgbr4 2/2 Running 0 4m56s @@ -1994,7 +1994,7 @@ Example failure: Ensure the linkerd-jaeger ServiceAccounts exist: ```bash -kubectl -n linkerd-jaeger get serviceaccounts +$ kubectl -n linkerd-jaeger get serviceaccounts NAME SECRETS AGE collector 1 23m jaeger 1 23m @@ -2004,7 +2004,7 @@ Also ensure you have permission to create ServiceAccounts in the linkerd-jaeger namespace: ```bash -kubectl -n linkerd-jaeger auth can-i create serviceaccounts +$ kubectl -n linkerd-jaeger auth can-i create serviceaccounts yes ``` @@ -2114,7 +2114,7 @@ Ensure you can connect to the Linkerd Buoyant version check endpoint from the environment the `linkerd` cli is running: ```bash -curl https://buoyant.cloud/version.json +$ curl https://buoyant.cloud/version.json {"linkerd-buoyant":"v0.4.4"} ``` @@ -2187,7 +2187,7 @@ buoyant-cloud-agent 2020-11-13T00:59:50Z Also ensure you have permission to create ClusterRoles: ```bash -$ kubectl auth can-i create clusterroles +$ kubectl auth can-i create ClusterRoles yes ``` @@ -2271,14 +2271,14 @@ yes Ensure the `buoyant-cloud-agent` Deployment exists: ```bash -$ kubectl -n buoyant-cloud get deploy/buoyant-cloud-agent +kubectl -n buoyant-cloud get deploy/buoyant-cloud-agent ``` If the Deployment does not exist, the `linkerd-buoyant` installation may be missing or incomplete. To reinstall the extension: ```bash -$ linkerd-buoyant install | kubectl apply -f - +linkerd-buoyant install | kubectl apply -f - ``` ### √ buoyant-cloud-agent Deployment is running @@ -2421,7 +2421,7 @@ Ensure the `buoyant-cloud-metrics` pods are injected, the `READY` column should show `2/2`: ```bash -kubectl -n buoyant-cloud get pods --selector app=buoyant-cloud-metrics +$ kubectl -n buoyant-cloud get pods --selector app=buoyant-cloud-metrics NAME READY STATUS RESTARTS AGE buoyant-cloud-metrics-kt9mv 2/2 Running 0 166m buoyant-cloud-metrics-q8jhj 2/2 Running 0 166m diff --git a/linkerd.io/content/2.12/tasks/configuring-per-route-policy.md b/linkerd.io/content/2.12/tasks/configuring-per-route-policy.md index fc1f8477be..8cada41de0 100644 --- a/linkerd.io/content/2.12/tasks/configuring-per-route-policy.md +++ b/linkerd.io/content/2.12/tasks/configuring-per-route-policy.md @@ -87,7 +87,7 @@ First, let's run the `linkerd viz authz` command to list the authorization resources that currently exist for the `authors` deployment: ```bash -linkerd viz authz -n booksapp deploy/authors +$ linkerd viz authz -n booksapp deploy/authors ROUTE SERVER AUTHORIZATION UNAUTHORIZED SUCCESS RPS LATENCY_P50 LATENCY_P95 LATENCY_P99 default default:all-unauthenticated default/all-unauthenticated 0.0rps 70.31% 8.1rps 1ms 43ms 49ms probe default:all-unauthenticated default/probe 0.0rps 100.00% 0.3rps 1ms 1ms 1ms @@ -124,7 +124,7 @@ Now that we've defined a [`Server`] for the authors `Deployment`, we can run the currently unauthorized: ```bash -linkerd viz authz -n booksapp deploy/authors +$ linkerd viz authz -n booksapp deploy/authors ROUTE SERVER AUTHORIZATION UNAUTHORIZED SUCCESS RPS LATENCY_P50 LATENCY_P95 LATENCY_P99 default authors-server 9.5rps 0.00% 0.0rps 0ms 0ms 0ms probe authors-server default/probe 0.0rps 100.00% 0.1rps 1ms 1ms 1ms @@ -291,7 +291,7 @@ network (0.0.0.0). Running `linkerd viz authz` again, we can now see that our new policies exist: ```bash -linkerd viz authz -n booksapp deploy/authors +$ linkerd viz authz -n booksapp deploy/authors ROUTE SERVER AUTHORIZATION UNAUTHORIZED SUCCESS RPS LATENCY_P50 LATENCY_P95 LATENCY_P99 authors-get-route authors-server authorizationpolicy/authors-get-policy 0.0rps 100.00% 0.1rps 2ms 2ms 2ms authors-probe-route authors-server authorizationpolicy/authors-probe-policy 0.0rps 100.00% 0.1rps 1ms 1ms 1ms @@ -362,7 +362,7 @@ requests, but we haven't _authorized_ requests to that route. Running the requests to `authors-modify-route`: ```bash -linkerd viz authz -n booksapp deploy/authors +$ linkerd viz authz -n booksapp deploy/authors ROUTE SERVER AUTHORIZATION UNAUTHORIZED SUCCESS RPS LATENCY_P50 LATENCY_P95 LATENCY_P99 authors-get-route authors-server authorizationpolicy/authors-get-policy - - - - - - authors-modify-route authors-server 9.7rps 0.00% 0.0rps 0ms 0ms 0ms @@ -421,7 +421,7 @@ Running the `linkerd viz authz` command one last time, we now see that all traffic is authorized: ```bash -linkerd viz authz -n booksapp deploy/authors +$ linkerd viz authz -n booksapp deploy/authors ROUTE SERVER AUTHORIZATION UNAUTHORIZED SUCCESS RPS LATENCY_P50 LATENCY_P95 LATENCY_P99 authors-get-route authors-server authorizationpolicy/authors-get-policy 0.0rps 100.00% 0.1rps 0ms 0ms 0ms authors-modify-route authors-server authorizationpolicy/authors-modify-policy 0.0rps 100.00% 0.0rps 0ms 0ms 0ms diff --git a/linkerd.io/content/2.12/tasks/multicluster-using-statefulsets.md b/linkerd.io/content/2.12/tasks/multicluster-using-statefulsets.md index c720c09563..b4b6920aad 100644 --- a/linkerd.io/content/2.12/tasks/multicluster-using-statefulsets.md +++ b/linkerd.io/content/2.12/tasks/multicluster-using-statefulsets.md @@ -60,10 +60,10 @@ everything. ```sh # create k3d clusters -./create.sh +$ ./create.sh # list the clusters -k3d cluster list +$ k3d cluster list NAME SERVERS AGENTS LOADBALANCER east 1/1 0/0 true west 1/1 0/0 true @@ -78,10 +78,10 @@ provided scripts, but feel free to have a look! ```sh # Install Linkerd and multicluster, output to check should be a success -./install.sh +$ ./install.sh # Next, link the two clusters together -./link.sh +$ ./link.sh ``` Perfect! If you've made it this far with no errors, then it's a good sign. In @@ -101,17 +101,17 @@ communication. First, we will deploy our pods and services: ```sh # deploy services and mesh namespaces -./deploy.sh +$ ./deploy.sh # verify both clusters # # verify east -kubectl --context=k3d-east get pods +$ kubectl --context=k3d-east get pods NAME READY STATUS RESTARTS AGE curl-56dc7d945d-96r6p 2/2 Running 0 7s # verify west has headless service -kubectl --context=k3d-west get services +$ kubectl --context=k3d-west get services NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE kubernetes ClusterIP 10.43.0.1 443/TCP 10m nginx-svc ClusterIP None 80/TCP 8s @@ -119,7 +119,7 @@ nginx-svc ClusterIP None 80/TCP 8s # verify west has statefulset # # this may take a while to come up -kubectl --context=k3d-west get pods +$ kubectl --context=k3d-west get pods NAME READY STATUS RESTARTS AGE nginx-set-0 2/2 Running 0 53s nginx-set-1 2/2 Running 0 43s @@ -130,7 +130,7 @@ Before we go further, let's have a look at the endpoints object for the `nginx-svc`: ```sh -kubectl --context=k3d-west get endpoints nginx-svc -o yaml +$ kubectl --context=k3d-west get endpoints nginx-svc -o yaml ... subsets: - addresses: @@ -170,15 +170,15 @@ would get an answer back. We can test this out by applying the curl pod to the `west` cluster: ```sh -kubectl --context=k3d-west apply -f east/curl.yml -kubectl --context=k3d-west get pods +$ kubectl --context=k3d-west apply -f east/curl.yml +$ kubectl --context=k3d-west get pods NAME READY STATUS RESTARTS AGE nginx-set-0 2/2 Running 0 5m8s nginx-set-1 2/2 Running 0 4m58s nginx-set-2 2/2 Running 0 4m51s curl-56dc7d945d-s4n8j 0/2 PodInitializing 0 4s -kubectl --context=k3d-west exec -it curl-56dc7d945d-s4n8j -c curl -- bin/sh +$ kubectl --context=k3d-west exec -it curl-56dc7d945d-s4n8j -c curl -- sh /# prompt for curl pod ``` @@ -186,7 +186,7 @@ If we now curl one of these instances, we will get back a response. ```sh # exec'd on the pod -/ curl nginx-set-0.nginx-svc.default.svc.west.cluster.local +$ curl nginx-set-0.nginx-svc.default.svc.west.cluster.local " @@ -218,10 +218,10 @@ Now, let's do the same, but this time from the `east` cluster. We will first export the service. ```sh -kubectl --context=k3d-west label service nginx-svc mirror.linkerd.io/exported="true" +$ kubectl --context=k3d-west label service nginx-svc mirror.linkerd.io/exported="true" service/nginx-svc labeled -kubectl --context=k3d-east get services +$ kubectl --context=k3d-east get services NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE kubernetes ClusterIP 10.43.0.1 443/TCP 20h nginx-svc-west ClusterIP None 80/TCP 29s @@ -235,7 +235,7 @@ endpoints for `nginx-svc-west` will have the same hostnames, but each hostname will point to one of the services we see above: ```sh -kubectl --context=k3d-east get endpoints nginx-svc-west -o yaml +$ kubectl --context=k3d-east get endpoints nginx-svc-west -o yaml subsets: - addresses: - hostname: nginx-set-0 @@ -251,17 +251,17 @@ cluster (`west`), will be mirrored as a clusterIP service. We will see in a second why this matters. ```sh -kubectl --context=k3d-east get pods +$ kubectl --context=k3d-east get pods NAME READY STATUS RESTARTS AGE curl-56dc7d945d-96r6p 2/2 Running 0 23m # exec and curl -kubectl --context=k3d-east exec pod curl-56dc7d945d-96r6p -it -c curl -- bin/sh +$ kubectl --context=k3d-east exec pod curl-56dc7d945d-96r6p -it -c curl -- bin/sh # we want to curl the same hostname we see in the endpoints object above. # however, the service and cluster domain will now be different, since we # are in a different cluster. # -/ curl nginx-set-0.nginx-svc-west.default.svc.east.cluster.local +$ curl nginx-set-0.nginx-svc-west.default.svc.east.cluster.local @@ -329,8 +329,8 @@ validation. To clean-up, you can remove both clusters entirely using the k3d CLI: ```sh -k3d cluster delete east +$ k3d cluster delete east cluster east deleted -k3d cluster delete west +$ k3d cluster delete west cluster west deleted ``` diff --git a/linkerd.io/content/2.12/tasks/securing-linkerd-tap.md b/linkerd.io/content/2.12/tasks/securing-linkerd-tap.md index f66601f5dd..d3023ec39f 100644 --- a/linkerd.io/content/2.12/tasks/securing-linkerd-tap.md +++ b/linkerd.io/content/2.12/tasks/securing-linkerd-tap.md @@ -57,7 +57,7 @@ kubectl auth can-i watch deployments.tap.linkerd.io -n emojivoto --as $(whoami) You can also use the Linkerd CLI's `--as` flag to confirm: ```bash -linkerd viz tap -n linkerd deploy/linkerd-controller --as $(whoami) +$ linkerd viz tap -n linkerd deploy/linkerd-controller --as $(whoami) Cannot connect to Linkerd Viz: namespaces is forbidden: User "XXXX" cannot list resource "namespaces" in API group "" at the cluster scope Validate the install with: linkerd viz check ... @@ -74,7 +74,7 @@ To enable tap access to all resources in all namespaces, you may bind your user to the `linkerd-linkerd-tap-admin` ClusterRole, installed by default: ```bash -kubectl describe clusterroles/linkerd-linkerd-viz-tap-admin +$ kubectl describe clusterroles/linkerd-linkerd-viz-tap-admin Name: linkerd-linkerd-viz-tap-admin Labels: component=tap linkerd.io/extension=viz @@ -106,7 +106,7 @@ kubectl create clusterrolebinding \ You can verify you now have tap access with: ```bash -linkerd viz tap -n linkerd deploy/linkerd-controller --as $(whoami) +$ linkerd viz tap -n linkerd deploy/linkerd-controller --as $(whoami) req id=3:0 proxy=in src=10.244.0.1:37392 dst=10.244.0.13:9996 tls=not_provided_by_remote :method=GET :authority=10.244.0.13:9996 :path=/ping ... ``` @@ -140,14 +140,14 @@ Because GCloud provides this additional level of access, there are cases where not. To validate this, check whether your GCloud user has Tap access: ```bash -kubectl auth can-i watch pods.tap.linkerd.io --all-namespaces +$ kubectl auth can-i watch pods.tap.linkerd.io --all-namespaces yes ``` And then validate whether your RBAC user has Tap access: ```bash -kubectl auth can-i watch pods.tap.linkerd.io --all-namespaces --as $(gcloud config get-value account) +$ kubectl auth can-i watch pods.tap.linkerd.io --all-namespaces --as $(gcloud config get-value account) no - no RBAC policy matched ``` @@ -184,14 +184,14 @@ privileges necessary to tap resources. To confirm: ```bash -kubectl auth can-i watch pods.tap.linkerd.io --all-namespaces --as system:serviceaccount:linkerd-viz:web +$ kubectl auth can-i watch pods.tap.linkerd.io --all-namespaces --as system:serviceaccount:linkerd-viz:web yes ``` This access is enabled via a `linkerd-linkerd-viz-web-admin` ClusterRoleBinding: ```bash -kubectl describe clusterrolebindings/linkerd-linkerd-viz-web-admin +$ kubectl describe clusterrolebindings/linkerd-linkerd-viz-web-admin Name: linkerd-linkerd-viz-web-admin Labels: component=web linkerd.io/extensions=viz @@ -224,6 +224,6 @@ kubectl delete clusterrolebindings/linkerd-linkerd-viz-web-admin To confirm: ```bash -kubectl auth can-i watch pods.tap.linkerd.io --all-namespaces --as system:serviceaccount:linkerd-viz:web +$ kubectl auth can-i watch pods.tap.linkerd.io --all-namespaces --as system:serviceaccount:linkerd-viz:web no ``` diff --git a/linkerd.io/content/2.12/tasks/troubleshooting.md b/linkerd.io/content/2.12/tasks/troubleshooting.md index ac0151b085..7ec6896a2d 100644 --- a/linkerd.io/content/2.12/tasks/troubleshooting.md +++ b/linkerd.io/content/2.12/tasks/troubleshooting.md @@ -862,7 +862,7 @@ Ensure you can connect to the Linkerd version check endpoint from the environment the `linkerd` cli is running: ```bash -curl "https://versioncheck.linkerd.io/version.json?version=edge-19.1.2&uuid=test-uuid&source=cli" +$ curl "https://versioncheck.linkerd.io/version.json?version=edge-19.1.2&uuid=test-uuid&source=cli" {"stable":"stable-2.1.0","edge":"edge-19.1.2"} ``` @@ -921,7 +921,7 @@ normally. Example failure: ```bash -linkerd check --proxy --namespace foo +$ linkerd check --proxy --namespace foo ... × data plane namespace exists The "foo" namespace does not exist @@ -1118,7 +1118,7 @@ Example error: Ensure that the linkerd-cni-config ConfigMap exists in the CNI namespace: ```bash -kubectl get cm linkerd-cni-config -n linkerd-cni +$ kubectl get cm linkerd-cni-config -n linkerd-cni NAME PRIV CAPS SELINUX RUNASUSER FSGROUP SUPGROUP READONLYROOTFS VOLUMES linkerd-linkerd-cni-cni false RunAsAny RunAsAny RunAsAny RunAsAny false hostPath,secret ``` @@ -1126,7 +1126,7 @@ linkerd-linkerd-cni-cni false RunAsAny RunAsAny RunAsAny RunAs Also ensure you have permission to create ConfigMaps: ```bash -kubectl auth can-i create ConfigMaps +$ kubectl auth can-i create ConfigMaps yes ``` @@ -1143,7 +1143,7 @@ Example error: Ensure that the cluster role exists: ```bash -kubectl get clusterrole linkerd-cni +$ kubectl get clusterrole linkerd-cni NAME AGE linkerd-cni 54m ``` @@ -1151,7 +1151,7 @@ linkerd-cni 54m Also ensure you have permission to create ClusterRoles: ```bash -kubectl auth can-i create ClusterRoles +$ kubectl auth can-i create ClusterRoles yes ``` @@ -1168,7 +1168,7 @@ Example error: Ensure that the cluster role binding exists: ```bash -kubectl get clusterrolebinding linkerd-cni +$ kubectl get clusterrolebinding linkerd-cni NAME AGE linkerd-cni 54m ``` @@ -1176,7 +1176,7 @@ linkerd-cni 54m Also ensure you have permission to create ClusterRoleBindings: ```bash -kubectl auth can-i create ClusterRoleBindings +$ kubectl auth can-i create ClusterRoleBindings yes ``` @@ -1193,7 +1193,7 @@ Example error: Ensure that the CNI service account exists in the CNI namespace: ```bash -kubectl get ServiceAccount linkerd-cni -n linkerd-cni +$ kubectl get ServiceAccount linkerd-cni -n linkerd-cni NAME SECRETS AGE linkerd-cni 1 45m ``` @@ -1201,7 +1201,7 @@ linkerd-cni 1 45m Also ensure you have permission to create ServiceAccount: ```bash -kubectl auth can-i create ServiceAccounts -n linkerd-cni +$ kubectl auth can-i create ServiceAccounts -n linkerd-cni yes ``` @@ -1218,7 +1218,7 @@ Example error: Ensure that the CNI daemonset exists in the CNI namespace: ```bash -kubectl get ds -n linkerd-cni +$ kubectl get ds -n linkerd-cni NAME DESIRED CURRENT READY UP-TO-DATE AVAILABLE NODE SELECTOR AGE linkerd-cni 1 1 1 1 1 beta.kubernetes.io/os=linux 14m ``` @@ -1226,7 +1226,7 @@ linkerd-cni 1 1 1 1 1 beta.kubernet Also ensure you have permission to create DaemonSets: ```bash -kubectl auth can-i create DaemonSets -n linkerd-cni +$ kubectl auth can-i create DaemonSets -n linkerd-cni yes ``` @@ -1243,7 +1243,7 @@ Example failure: Ensure that all the CNI pods are running: ```bash -kubectl get po -n linkerd-cni +$ kubectl get po -n linkerd-cn NAME READY STATUS RESTARTS AGE linkerd-cni-rzp2q 1/1 Running 0 9m20s linkerd-cni-mf564 1/1 Running 0 9m22s @@ -1253,7 +1253,7 @@ linkerd-cni-p5670 1/1 Running 0 9m25s Ensure that all pods have finished the deployment of the CNI config and binary: ```bash -kubectl logs linkerd-cni-rzp2q -n linkerd-cni +$ kubectl logs linkerd-cni-rzp2q -n linkerd-cni Wrote linkerd CNI binaries to /host/opt/cni/bin Created CNI config /host/etc/cni/net.d/10-kindnet.conflist Done configuring CNI. Sleep=true @@ -1555,7 +1555,7 @@ linkerd-linkerd-viz-web-check 2021-01-2 Also ensure you have permission to create ClusterRoles: ```bash -kubectl auth can-i create clusterroles +$ kubectl auth can-i create clusterroles yes ``` @@ -1721,12 +1721,12 @@ Make sure that the `proxy-injector` is working correctly by running Ensure all the prometheus related resources are present and running correctly. ```bash -$ kubectl -n linkerd-viz get deploy,cm | grep prometheus +❯ kubectl -n linkerd-viz get deploy,cm | grep prometheus deployment.apps/prometheus 1/1 1 1 3m18s configmap/prometheus-config 1 3m18s -$ kubectl get clusterRoleBindings | grep prometheus +❯ kubectl get clusterRoleBindings | grep prometheus linkerd-linkerd-viz-prometheus ClusterRole/linkerd-linkerd-viz-prometheus 3m37s -$ kubectl get clusterRoles | grep prometheus +❯ kubectl get clusterRoles | grep prometheus linkerd-linkerd-viz-prometheus 2021-02-26T06:03:11Zh ``` @@ -1742,7 +1742,7 @@ Example failure: Verify that the metrics API pod is running correctly ```bash -$ kubectl -n linkerd-viz get pods +❯ kubectl -n linkerd-viz get pods NAME READY STATUS RESTARTS AGE metrics-api-7bb8cb8489-cbq4m 2/2 Running 0 4m58s tap-injector-6b9bc6fc4-cgbr4 2/2 Running 0 4m56s @@ -2023,7 +2023,7 @@ buoyant-cloud-agent 2020-11-13T00:59:50Z Also ensure you have permission to create ClusterRoles: ```bash -$ kubectl auth can-i create clusterroles +$ kubectl auth can-i create ClusterRoles yes ``` @@ -2107,14 +2107,14 @@ yes Ensure the `buoyant-cloud-agent` Deployment exists: ```bash -$ kubectl -n buoyant-cloud get deploy/buoyant-cloud-agent +kubectl -n buoyant-cloud get deploy/buoyant-cloud-agent ``` If the Deployment does not exist, the `linkerd-buoyant` installation may be missing or incomplete. To reinstall the extension: ```bash -$ linkerd-buoyant install | kubectl apply -f - +linkerd-buoyant install | kubectl apply -f - ``` ### √ buoyant-cloud-agent Deployment is running @@ -2180,7 +2180,7 @@ Agent version: v0.4.4 To update to the latest version: ```bash -$ linkerd-buoyant install | kubectl apply -f - +linkerd-buoyant install | kubectl apply -f - ``` ### √ buoyant-cloud-agent Deployment is running a single pod @@ -2194,7 +2194,7 @@ $ linkerd-buoyant install | kubectl apply -f - `buoyant-cloud-agent` should run as a singleton. Check for other pods: ```bash -$ kubectl get po -A --selector app=buoyant-cloud-agent +kubectl get po -A --selector app=buoyant-cloud-agent ``` ### √ buoyant-cloud-metrics DaemonSet exists @@ -2208,14 +2208,14 @@ $ kubectl get po -A --selector app=buoyant-cloud-agent Ensure the `buoyant-cloud-metrics` DaemonSet exists: ```bash -$ kubectl -n buoyant-cloud get daemonset/buoyant-cloud-metrics +kubectl -n buoyant-cloud get daemonset/buoyant-cloud-metrics ``` If the DaemonSet does not exist, the `linkerd-buoyant` installation may be missing or incomplete. To reinstall the extension: ```bash -$ linkerd-buoyant install | kubectl apply -f - +linkerd-buoyant install | kubectl apply -f - ``` ### √ buoyant-cloud-metrics DaemonSet is running diff --git a/linkerd.io/content/2.13/tasks/configuring-dynamic-request-routing.md b/linkerd.io/content/2.13/tasks/configuring-dynamic-request-routing.md index af753bbcf7..8dea27c84f 100644 --- a/linkerd.io/content/2.13/tasks/configuring-dynamic-request-routing.md +++ b/linkerd.io/content/2.13/tasks/configuring-dynamic-request-routing.md @@ -67,7 +67,7 @@ Requests to `/echo` on port 9898 to the frontend pod will get forwarded the pod pointed by the Service `backend-a-podinfo`: ```bash -curl -sX POST localhost:9898/echo \ +$ curl -sX POST localhost:9898/echo \ | grep -o 'PODINFO_UI_MESSAGE=. backend' PODINFO_UI_MESSAGE=A backend @@ -142,17 +142,17 @@ to the `backend-a-podinfo` Service. The previous requests should still reach `backend-a-podinfo` only: ```bash -curl -sX POST localhost:9898/echo \ +$ curl -sX POST localhost:9898/echo \ | grep -o 'PODINFO_UI_MESSAGE=. backend' PODINFO_UI_MESSAGE=A backend ``` -But if we add the "`x-request-id: alternative`" header they get routed to +But if we add the `x-request-id: alternative` header, they get routed to `backend-b-podinfo`: ```bash -curl -sX POST \ +$ curl -sX POST \ -H 'x-request-id: alternative' \ localhost:9898/echo \ | grep -o 'PODINFO_UI_MESSAGE=. backend' diff --git a/linkerd.io/content/2.13/tasks/configuring-per-route-policy.md b/linkerd.io/content/2.13/tasks/configuring-per-route-policy.md index 30e18a67c1..c5cefa6861 100644 --- a/linkerd.io/content/2.13/tasks/configuring-per-route-policy.md +++ b/linkerd.io/content/2.13/tasks/configuring-per-route-policy.md @@ -87,7 +87,7 @@ First, let's run the `linkerd viz authz` command to list the authorization resources that currently exist for the `authors` deployment: ```bash -linkerd viz authz -n booksapp deploy/authors +$ linkerd viz authz -n booksapp deploy/authors ROUTE SERVER AUTHORIZATION UNAUTHORIZED SUCCESS RPS LATENCY_P50 LATENCY_P95 LATENCY_P99 default default:all-unauthenticated default/all-unauthenticated 0.0rps 70.31% 8.1rps 1ms 43ms 49ms probe default:all-unauthenticated default/probe 0.0rps 100.00% 0.3rps 1ms 1ms 1ms @@ -124,7 +124,7 @@ Now that we've defined a [`Server`] for the authors `Deployment`, we can run the currently unauthorized: ```bash -linkerd viz authz -n booksapp deploy/authors +$ linkerd viz authz -n booksapp deploy/authors ROUTE SERVER AUTHORIZATION UNAUTHORIZED SUCCESS RPS LATENCY_P50 LATENCY_P95 LATENCY_P99 default authors-server 9.5rps 0.00% 0.0rps 0ms 0ms 0ms probe authors-server default/probe 0.0rps 100.00% 0.1rps 1ms 1ms 1ms @@ -291,7 +291,7 @@ network (0.0.0.0). Running `linkerd viz authz` again, we can now see that our new policies exist: ```bash -linkerd viz authz -n booksapp deploy/authors +$ linkerd viz authz -n booksapp deploy/authors ROUTE SERVER AUTHORIZATION UNAUTHORIZED SUCCESS RPS LATENCY_P50 LATENCY_P95 LATENCY_P99 authors-get-route authors-server authorizationpolicy/authors-get-policy 0.0rps 100.00% 0.1rps 2ms 2ms 2ms authors-probe-route authors-server authorizationpolicy/authors-probe-policy 0.0rps 100.00% 0.1rps 1ms 1ms 1ms @@ -362,7 +362,7 @@ requests, but we haven't _authorized_ requests to that route. Running the requests to `authors-modify-route`: ```bash -linkerd viz authz -n booksapp deploy/authors +$ linkerd viz authz -n booksapp deploy/authors ROUTE SERVER AUTHORIZATION UNAUTHORIZED SUCCESS RPS LATENCY_P50 LATENCY_P95 LATENCY_P99 authors-get-route authors-server authorizationpolicy/authors-get-policy - - - - - - authors-modify-route authors-server 9.7rps 0.00% 0.0rps 0ms 0ms 0ms @@ -421,7 +421,7 @@ Running the `linkerd viz authz` command one last time, we now see that all traffic is authorized: ```bash -linkerd viz authz -n booksapp deploy/authors +$ linkerd viz authz -n booksapp deploy/authors ROUTE SERVER AUTHORIZATION UNAUTHORIZED SUCCESS RPS LATENCY_P50 LATENCY_P95 LATENCY_P99 authors-get-route authors-server authorizationpolicy/authors-get-policy 0.0rps 100.00% 0.1rps 0ms 0ms 0ms authors-modify-route authors-server authorizationpolicy/authors-modify-policy 0.0rps 100.00% 0.0rps 0ms 0ms 0ms diff --git a/linkerd.io/content/2.13/tasks/multicluster-using-statefulsets.md b/linkerd.io/content/2.13/tasks/multicluster-using-statefulsets.md index 1e4bee4013..8a37486fea 100644 --- a/linkerd.io/content/2.13/tasks/multicluster-using-statefulsets.md +++ b/linkerd.io/content/2.13/tasks/multicluster-using-statefulsets.md @@ -178,7 +178,7 @@ nginx-set-1 2/2 Running 0 4m58s nginx-set-2 2/2 Running 0 4m51s curl-56dc7d945d-s4n8j 0/2 PodInitializing 0 4s -$ kubectl --context=k3d-west exec -it curl-56dc7d945d-s4n8j -c curl -- bin/sh +$ kubectl --context=k3d-west exec -it curl-56dc7d945d-s4n8j -c curl -- sh /# prompt for curl pod ``` @@ -186,7 +186,7 @@ If we now curl one of these instances, we will get back a response. ```sh # exec'd on the pod -/ $ curl nginx-set-0.nginx-svc.default.svc.west.cluster.local +$ curl nginx-set-0.nginx-svc.default.svc.west.cluster.local " @@ -221,7 +221,7 @@ export the service. $ kubectl --context=k3d-west label service nginx-svc mirror.linkerd.io/exported="true" service/nginx-svc labeled -$ kubectl --context=k3d-east get services +kubectl --context=k3d-east get services NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE kubernetes ClusterIP 10.43.0.1 443/TCP 20h nginx-svc-west ClusterIP None 80/TCP 29s @@ -235,7 +235,7 @@ endpoints for `nginx-svc-west` will have the same hostnames, but each hostname will point to one of the services we see above: ```sh -$ kubectl --context=k3d-east get endpoints nginx-svc-west -o yaml +$ kubectl --context=k3d-east get endpoints nginx-svc-k3d-west -o yaml subsets: - addresses: - hostname: nginx-set-0 @@ -261,7 +261,7 @@ $ kubectl --context=k3d-east exec pod curl-56dc7d945d-96r6p -it -c curl -- bin/s # however, the service and cluster domain will now be different, since we # are in a different cluster. # -/ $ curl nginx-set-0.nginx-svc-west.default.svc.east.cluster.local +$ curl nginx-set-0.nginx-svc-west.default.svc.east.cluster.local diff --git a/linkerd.io/content/2.13/tasks/troubleshooting.md b/linkerd.io/content/2.13/tasks/troubleshooting.md index ffe22e5153..a635870d2f 100644 --- a/linkerd.io/content/2.13/tasks/troubleshooting.md +++ b/linkerd.io/content/2.13/tasks/troubleshooting.md @@ -862,7 +862,7 @@ Ensure you can connect to the Linkerd version check endpoint from the environment the `linkerd` cli is running: ```bash -curl "https://versioncheck.linkerd.io/version.json?version=edge-19.1.2&uuid=test-uuid&source=cli" +$ curl "https://versioncheck.linkerd.io/version.json?version=edge-19.1.2&uuid=test-uuid&source=cli" {"stable":"stable-2.1.0","edge":"edge-19.1.2"} ``` diff --git a/linkerd.io/content/2.14/tasks/configuring-dynamic-request-routing.md b/linkerd.io/content/2.14/tasks/configuring-dynamic-request-routing.md index e554bc6ac4..c38f2a438c 100644 --- a/linkerd.io/content/2.14/tasks/configuring-dynamic-request-routing.md +++ b/linkerd.io/content/2.14/tasks/configuring-dynamic-request-routing.md @@ -67,7 +67,7 @@ Requests to `/echo` on port 9898 to the frontend pod will get forwarded the pod pointed by the Service `backend-a-podinfo`: ```bash -curl -sX POST localhost:9898/echo \ +$ curl -sX POST localhost:9898/echo \ | grep -o 'PODINFO_UI_MESSAGE=. backend' PODINFO_UI_MESSAGE=A backend @@ -161,7 +161,7 @@ to the `backend-a-podinfo` Service. The previous requests should still reach `backend-a-podinfo` only: ```bash -curl -sX POST localhost:9898/echo \ +$ curl -sX POST localhost:9898/echo \ | grep -o 'PODINFO_UI_MESSAGE=. backend' PODINFO_UI_MESSAGE=A backend @@ -171,7 +171,7 @@ But if we add the "`x-request-id: alternative`" header they get routed to `backend-b-podinfo`: ```bash -curl -sX POST \ +$ curl -sX POST \ -H 'x-request-id: alternative' \ localhost:9898/echo \ | grep -o 'PODINFO_UI_MESSAGE=. backend' diff --git a/linkerd.io/content/2.14/tasks/configuring-per-route-policy.md b/linkerd.io/content/2.14/tasks/configuring-per-route-policy.md index 63b79fc6d4..ea27641a9c 100644 --- a/linkerd.io/content/2.14/tasks/configuring-per-route-policy.md +++ b/linkerd.io/content/2.14/tasks/configuring-per-route-policy.md @@ -87,7 +87,7 @@ First, let's run the `linkerd viz authz` command to list the authorization resources that currently exist for the `authors` deployment: ```bash -linkerd viz authz -n booksapp deploy/authors +$ linkerd viz authz -n booksapp deploy/authors ROUTE SERVER AUTHORIZATION UNAUTHORIZED SUCCESS RPS LATENCY_P50 LATENCY_P95 LATENCY_P99 default default:all-unauthenticated default/all-unauthenticated 0.0rps 70.31% 8.1rps 1ms 43ms 49ms probe default:all-unauthenticated default/probe 0.0rps 100.00% 0.3rps 1ms 1ms 1ms @@ -124,7 +124,7 @@ Now that we've defined a [`Server`] for the authors `Deployment`, we can run the currently unauthorized: ```bash -linkerd viz authz -n booksapp deploy/authors +$ linkerd viz authz -n booksapp deploy/authors ROUTE SERVER AUTHORIZATION UNAUTHORIZED SUCCESS RPS LATENCY_P50 LATENCY_P95 LATENCY_P99 default authors-server 9.5rps 0.00% 0.0rps 0ms 0ms 0ms probe authors-server default/probe 0.0rps 100.00% 0.1rps 1ms 1ms 1ms @@ -312,7 +312,7 @@ network (0.0.0.0). Running `linkerd viz authz` again, we can now see that our new policies exist: ```bash -linkerd viz authz -n booksapp deploy/authors +$ linkerd viz authz -n booksapp deploy/authors ROUTE SERVER AUTHORIZATION UNAUTHORIZED SUCCESS RPS LATENCY_P50 LATENCY_P95 LATENCY_P99 authors-get-route authors-server authorizationpolicy/authors-get-policy 0.0rps 100.00% 0.1rps 2ms 2ms 2ms authors-probe-route authors-server authorizationpolicy/authors-probe-policy 0.0rps 100.00% 0.1rps 1ms 1ms 1ms @@ -383,7 +383,7 @@ requests, but we haven't _authorized_ requests to that route. Running the requests to `authors-modify-route`: ```bash -linkerd viz authz -n booksapp deploy/authors +$ linkerd viz authz -n booksapp deploy/authors ROUTE SERVER AUTHORIZATION UNAUTHORIZED SUCCESS RPS LATENCY_P50 LATENCY_P95 LATENCY_P99 authors-get-route authors-server authorizationpolicy/authors-get-policy - - - - - - authors-modify-route authors-server 9.7rps 0.00% 0.0rps 0ms 0ms 0ms @@ -442,7 +442,7 @@ Running the `linkerd viz authz` command one last time, we now see that all traffic is authorized: ```bash -linkerd viz authz -n booksapp deploy/authors +$ linkerd viz authz -n booksapp deploy/authors ROUTE SERVER AUTHORIZATION UNAUTHORIZED SUCCESS RPS LATENCY_P50 LATENCY_P95 LATENCY_P99 authors-get-route authors-server authorizationpolicy/authors-get-policy 0.0rps 100.00% 0.1rps 0ms 0ms 0ms authors-modify-route authors-server authorizationpolicy/authors-modify-policy 0.0rps 100.00% 0.0rps 0ms 0ms 0ms diff --git a/linkerd.io/content/2.14/tasks/multicluster-using-statefulsets.md b/linkerd.io/content/2.14/tasks/multicluster-using-statefulsets.md index c720c09563..7b4ad479c5 100644 --- a/linkerd.io/content/2.14/tasks/multicluster-using-statefulsets.md +++ b/linkerd.io/content/2.14/tasks/multicluster-using-statefulsets.md @@ -60,10 +60,10 @@ everything. ```sh # create k3d clusters -./create.sh +$ ./create.sh # list the clusters -k3d cluster list +$ k3d cluster list NAME SERVERS AGENTS LOADBALANCER east 1/1 0/0 true west 1/1 0/0 true @@ -78,10 +78,10 @@ provided scripts, but feel free to have a look! ```sh # Install Linkerd and multicluster, output to check should be a success -./install.sh +$ ./install.sh # Next, link the two clusters together -./link.sh +$ ./link.sh ``` Perfect! If you've made it this far with no errors, then it's a good sign. In @@ -101,17 +101,17 @@ communication. First, we will deploy our pods and services: ```sh # deploy services and mesh namespaces -./deploy.sh +$ ./deploy.sh # verify both clusters # # verify east -kubectl --context=k3d-east get pods +$ kubectl --context=k3d-east get pods NAME READY STATUS RESTARTS AGE curl-56dc7d945d-96r6p 2/2 Running 0 7s # verify west has headless service -kubectl --context=k3d-west get services +$ kubectl --context=k3d-west get services NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE kubernetes ClusterIP 10.43.0.1 443/TCP 10m nginx-svc ClusterIP None 80/TCP 8s @@ -119,7 +119,7 @@ nginx-svc ClusterIP None 80/TCP 8s # verify west has statefulset # # this may take a while to come up -kubectl --context=k3d-west get pods +$ kubectl --context=k3d-west get pods NAME READY STATUS RESTARTS AGE nginx-set-0 2/2 Running 0 53s nginx-set-1 2/2 Running 0 43s @@ -130,7 +130,7 @@ Before we go further, let's have a look at the endpoints object for the `nginx-svc`: ```sh -kubectl --context=k3d-west get endpoints nginx-svc -o yaml +$ kubectl --context=k3d-west get endpoints nginx-svc -o yaml ... subsets: - addresses: @@ -170,15 +170,15 @@ would get an answer back. We can test this out by applying the curl pod to the `west` cluster: ```sh -kubectl --context=k3d-west apply -f east/curl.yml -kubectl --context=k3d-west get pods +$ kubectl --context=k3d-west apply -f east/curl.yml +$ kubectl --context=k3d-west get pods NAME READY STATUS RESTARTS AGE nginx-set-0 2/2 Running 0 5m8s nginx-set-1 2/2 Running 0 4m58s nginx-set-2 2/2 Running 0 4m51s curl-56dc7d945d-s4n8j 0/2 PodInitializing 0 4s -kubectl --context=k3d-west exec -it curl-56dc7d945d-s4n8j -c curl -- bin/sh +$ kubectl --context=k3d-west exec -it curl-56dc7d945d-s4n8j -c curl -- sh /# prompt for curl pod ``` @@ -186,7 +186,7 @@ If we now curl one of these instances, we will get back a response. ```sh # exec'd on the pod -/ curl nginx-set-0.nginx-svc.default.svc.west.cluster.local +$ curl nginx-set-0.nginx-svc.default.svc.west.cluster.local " @@ -218,7 +218,7 @@ Now, let's do the same, but this time from the `east` cluster. We will first export the service. ```sh -kubectl --context=k3d-west label service nginx-svc mirror.linkerd.io/exported="true" +$ kubectl --context=k3d-west label service nginx-svc mirror.linkerd.io/exported="true" service/nginx-svc labeled kubectl --context=k3d-east get services @@ -235,7 +235,7 @@ endpoints for `nginx-svc-west` will have the same hostnames, but each hostname will point to one of the services we see above: ```sh -kubectl --context=k3d-east get endpoints nginx-svc-west -o yaml +$ kubectl --context=k3d-east get endpoints nginx-svc-k3d-west -o yaml subsets: - addresses: - hostname: nginx-set-0 @@ -251,17 +251,17 @@ cluster (`west`), will be mirrored as a clusterIP service. We will see in a second why this matters. ```sh -kubectl --context=k3d-east get pods +$ kubectl --context=k3d-east get pods NAME READY STATUS RESTARTS AGE curl-56dc7d945d-96r6p 2/2 Running 0 23m # exec and curl -kubectl --context=k3d-east exec pod curl-56dc7d945d-96r6p -it -c curl -- bin/sh +$ kubectl --context=k3d-east exec pod curl-56dc7d945d-96r6p -it -c curl -- bin/sh # we want to curl the same hostname we see in the endpoints object above. # however, the service and cluster domain will now be different, since we # are in a different cluster. # -/ curl nginx-set-0.nginx-svc-west.default.svc.east.cluster.local +$ curl nginx-set-0.nginx-svc-west.default.svc.east.cluster.local @@ -329,8 +329,8 @@ validation. To clean-up, you can remove both clusters entirely using the k3d CLI: ```sh -k3d cluster delete east +$ k3d cluster delete east cluster east deleted -k3d cluster delete west +$ k3d cluster delete west cluster west deleted ``` diff --git a/linkerd.io/content/2.14/tasks/restricting-access.md b/linkerd.io/content/2.14/tasks/restricting-access.md index 38ebdaeb3d..af25ce411e 100644 --- a/linkerd.io/content/2.14/tasks/restricting-access.md +++ b/linkerd.io/content/2.14/tasks/restricting-access.md @@ -21,9 +21,9 @@ haven't already done this. Inject and install the Emojivoto application: ```bash -linkerd inject https://run.linkerd.io/emojivoto.yml | kubectl apply -f - +$ linkerd inject https://run.linkerd.io/emojivoto.yml | kubectl apply -f - ... -linkerd check -n emojivoto --proxy -o short +$ linkerd check -n emojivoto --proxy -o short ... ``` @@ -68,7 +68,7 @@ of requests coming to the voting service and see that all incoming requests to the voting-grpc server are currently unauthorized: ```bash -> linkerd viz authz -n emojivoto deploy/voting +$ linkerd viz authz -n emojivoto deploy/voting ROUTE SERVER AUTHORIZATION UNAUTHORIZED SUCCESS RPS LATENCY_P50 LATENCY_P95 LATENCY_P99 default default:all-unauthenticated default/all-unauthenticated 0.0rps 100.00% 0.1rps 1ms 1ms 1ms probe default:all-unauthenticated default/probe 0.0rps 100.00% 0.2rps 1ms 1ms 1ms @@ -112,7 +112,7 @@ the `linkerd viz auth` command queries over a time-window, you may see some UNAUTHORIZED requests displayed for a short amount of time. ```bash -> linkerd viz authz -n emojivoto deploy/voting +$ linkerd viz authz -n emojivoto deploy/voting ROUTE SERVER AUTHORIZATION UNAUTHORIZED SUCCESS RPS LATENCY_P50 LATENCY_P95 LATENCY_P99 default default:all-unauthenticated default/all-unauthenticated 0.0rps 100.00% 0.1rps 1ms 1ms 1ms probe default:all-unauthenticated default/probe 0.0rps 100.00% 0.2rps 1ms 1ms 1ms @@ -123,7 +123,7 @@ We can also test that request from other pods will be rejected by creating a `grpcurl` pod and attempting to access the Voting service from it: ```bash -> kubectl run grpcurl --rm -it --image=networld/grpcurl --restart=Never --command -- ./grpcurl -plaintext voting-svc.emojivoto:8080 emojivoto.v1.VotingService/VoteDog +$ kubectl run grpcurl --rm -it --image=networld/grpcurl --restart=Never --command -- ./grpcurl -plaintext voting-svc.emojivoto:8080 emojivoto.v1.VotingService/VoteDog Error invoking method "emojivoto.v1.VotingService/VoteDog": failed to query for service descriptor "emojivoto.v1.VotingService": rpc error: code = PermissionDenied desc = pod "grpcurl" deleted pod default/grpcurl terminated (Error) @@ -153,7 +153,7 @@ following logic when deciding whether to allow a request: We can set the default policy to `deny` using the `linkerd upgrade` command: ```bash -> linkerd upgrade --default-inbound-policy deny | kubectl apply -f - +linkerd upgrade --default-inbound-policy deny | kubectl apply -f - ``` Alternatively, default policies can be set on individual workloads or namespaces diff --git a/linkerd.io/content/2.14/tasks/securing-linkerd-tap.md b/linkerd.io/content/2.14/tasks/securing-linkerd-tap.md index 639f81692f..8a802c890c 100644 --- a/linkerd.io/content/2.14/tasks/securing-linkerd-tap.md +++ b/linkerd.io/content/2.14/tasks/securing-linkerd-tap.md @@ -60,7 +60,7 @@ kubectl auth can-i watch deployments.tap.linkerd.io -n emojivoto --as $(whoami) You can also use the Linkerd CLI's `--as` flag to confirm: ```bash -linkerd viz tap -n linkerd deploy/linkerd-controller --as $(whoami) +$ linkerd viz tap -n linkerd deploy/linkerd-controller --as $(whoami) Cannot connect to Linkerd Viz: namespaces is forbidden: User "XXXX" cannot list resource "namespaces" in API group "" at the cluster scope Validate the install with: linkerd viz check ... @@ -77,7 +77,7 @@ To enable tap access to all resources in all namespaces, you may bind your user to the `linkerd-linkerd-tap-admin` ClusterRole, installed by default: ```bash -kubectl describe clusterroles/linkerd-linkerd-viz-tap-admin +$ kubectl describe clusterroles/linkerd-linkerd-viz-tap-admin Name: linkerd-linkerd-viz-tap-admin Labels: component=tap linkerd.io/extension=viz @@ -109,7 +109,7 @@ kubectl create clusterrolebinding \ You can verify you now have tap access with: ```bash -linkerd viz tap -n linkerd deploy/linkerd-controller --as $(whoami) +$ linkerd viz tap -n linkerd deploy/linkerd-controller --as $(whoami) req id=3:0 proxy=in src=10.244.0.1:37392 dst=10.244.0.13:9996 tls=not_provided_by_remote :method=GET :authority=10.244.0.13:9996 :path=/ping ... ``` @@ -143,14 +143,14 @@ Because GCloud provides this additional level of access, there are cases where not. To validate this, check whether your GCloud user has Tap access: ```bash -kubectl auth can-i watch pods.tap.linkerd.io --all-namespaces +$ kubectl auth can-i watch pods.tap.linkerd.io --all-namespaces yes ``` And then validate whether your RBAC user has Tap access: ```bash -kubectl auth can-i watch pods.tap.linkerd.io --all-namespaces --as $(gcloud config get-value account) +$ kubectl auth can-i watch pods.tap.linkerd.io --all-namespaces --as $(gcloud config get-value account) no - no RBAC policy matched ``` @@ -187,14 +187,14 @@ privileges necessary to tap resources. To confirm: ```bash -kubectl auth can-i watch pods.tap.linkerd.io --all-namespaces --as system:serviceaccount:linkerd-viz:web +$ kubectl auth can-i watch pods.tap.linkerd.io --all-namespaces --as system:serviceaccount:linkerd-viz:web yes ``` This access is enabled via a `linkerd-linkerd-viz-web-admin` ClusterRoleBinding: ```bash -kubectl describe clusterrolebindings/linkerd-linkerd-viz-web-admin +$ kubectl describe clusterrolebindings/linkerd-linkerd-viz-web-admin Name: linkerd-linkerd-viz-web-admin Labels: component=web linkerd.io/extensions=viz @@ -227,6 +227,6 @@ kubectl delete clusterrolebindings/linkerd-linkerd-viz-web-admin To confirm: ```bash -kubectl auth can-i watch pods.tap.linkerd.io --all-namespaces --as system:serviceaccount:linkerd-viz:web +$ kubectl auth can-i watch pods.tap.linkerd.io --all-namespaces --as system:serviceaccount:linkerd-viz:web no ``` diff --git a/linkerd.io/content/2.14/tasks/troubleshooting.md b/linkerd.io/content/2.14/tasks/troubleshooting.md index 6dd989f3b3..804a2382ee 100644 --- a/linkerd.io/content/2.14/tasks/troubleshooting.md +++ b/linkerd.io/content/2.14/tasks/troubleshooting.md @@ -862,7 +862,7 @@ Ensure you can connect to the Linkerd version check endpoint from the environment the `linkerd` cli is running: ```bash -curl "https://versioncheck.linkerd.io/version.json?version=edge-19.1.2&uuid=test-uuid&source=cli" +$ curl "https://versioncheck.linkerd.io/version.json?version=edge-19.1.2&uuid=test-uuid&source=cli" {"stable":"stable-2.1.0","edge":"edge-19.1.2"} ``` @@ -1950,7 +1950,7 @@ Ensure you can connect to the Linkerd Buoyant version check endpoint from the environment the `linkerd` cli is running: ```bash -curl https://buoyant.cloud/version.json +$ curl https://buoyant.cloud/version.json {"linkerd-buoyant":"v0.4.4"} ``` @@ -2194,7 +2194,7 @@ linkerd-buoyant install | kubectl apply -f - `buoyant-cloud-agent` should run as a singleton. Check for other pods: ```bash -kubectl get po -A --selector app=buoyant-cloud-agent +$ kubectl get po -A --selector app=buoyant-cloud-agent ``` ### √ buoyant-cloud-metrics DaemonSet exists @@ -2208,7 +2208,7 @@ kubectl get po -A --selector app=buoyant-cloud-agent Ensure the `buoyant-cloud-metrics` DaemonSet exists: ```bash -kubectl -n buoyant-cloud get daemonset/buoyant-cloud-metrics +$ kubectl -n buoyant-cloud get daemonset/buoyant-cloud-metrics ``` If the DaemonSet does not exist, the `linkerd-buoyant` installation may be diff --git a/linkerd.io/content/2.15/tasks/configuring-dynamic-request-routing.md b/linkerd.io/content/2.15/tasks/configuring-dynamic-request-routing.md index e554bc6ac4..c38f2a438c 100644 --- a/linkerd.io/content/2.15/tasks/configuring-dynamic-request-routing.md +++ b/linkerd.io/content/2.15/tasks/configuring-dynamic-request-routing.md @@ -67,7 +67,7 @@ Requests to `/echo` on port 9898 to the frontend pod will get forwarded the pod pointed by the Service `backend-a-podinfo`: ```bash -curl -sX POST localhost:9898/echo \ +$ curl -sX POST localhost:9898/echo \ | grep -o 'PODINFO_UI_MESSAGE=. backend' PODINFO_UI_MESSAGE=A backend @@ -161,7 +161,7 @@ to the `backend-a-podinfo` Service. The previous requests should still reach `backend-a-podinfo` only: ```bash -curl -sX POST localhost:9898/echo \ +$ curl -sX POST localhost:9898/echo \ | grep -o 'PODINFO_UI_MESSAGE=. backend' PODINFO_UI_MESSAGE=A backend @@ -171,7 +171,7 @@ But if we add the "`x-request-id: alternative`" header they get routed to `backend-b-podinfo`: ```bash -curl -sX POST \ +$ curl -sX POST \ -H 'x-request-id: alternative' \ localhost:9898/echo \ | grep -o 'PODINFO_UI_MESSAGE=. backend' diff --git a/linkerd.io/content/2.15/tasks/configuring-per-route-policy.md b/linkerd.io/content/2.15/tasks/configuring-per-route-policy.md index 63b79fc6d4..ea27641a9c 100644 --- a/linkerd.io/content/2.15/tasks/configuring-per-route-policy.md +++ b/linkerd.io/content/2.15/tasks/configuring-per-route-policy.md @@ -87,7 +87,7 @@ First, let's run the `linkerd viz authz` command to list the authorization resources that currently exist for the `authors` deployment: ```bash -linkerd viz authz -n booksapp deploy/authors +$ linkerd viz authz -n booksapp deploy/authors ROUTE SERVER AUTHORIZATION UNAUTHORIZED SUCCESS RPS LATENCY_P50 LATENCY_P95 LATENCY_P99 default default:all-unauthenticated default/all-unauthenticated 0.0rps 70.31% 8.1rps 1ms 43ms 49ms probe default:all-unauthenticated default/probe 0.0rps 100.00% 0.3rps 1ms 1ms 1ms @@ -124,7 +124,7 @@ Now that we've defined a [`Server`] for the authors `Deployment`, we can run the currently unauthorized: ```bash -linkerd viz authz -n booksapp deploy/authors +$ linkerd viz authz -n booksapp deploy/authors ROUTE SERVER AUTHORIZATION UNAUTHORIZED SUCCESS RPS LATENCY_P50 LATENCY_P95 LATENCY_P99 default authors-server 9.5rps 0.00% 0.0rps 0ms 0ms 0ms probe authors-server default/probe 0.0rps 100.00% 0.1rps 1ms 1ms 1ms @@ -312,7 +312,7 @@ network (0.0.0.0). Running `linkerd viz authz` again, we can now see that our new policies exist: ```bash -linkerd viz authz -n booksapp deploy/authors +$ linkerd viz authz -n booksapp deploy/authors ROUTE SERVER AUTHORIZATION UNAUTHORIZED SUCCESS RPS LATENCY_P50 LATENCY_P95 LATENCY_P99 authors-get-route authors-server authorizationpolicy/authors-get-policy 0.0rps 100.00% 0.1rps 2ms 2ms 2ms authors-probe-route authors-server authorizationpolicy/authors-probe-policy 0.0rps 100.00% 0.1rps 1ms 1ms 1ms @@ -383,7 +383,7 @@ requests, but we haven't _authorized_ requests to that route. Running the requests to `authors-modify-route`: ```bash -linkerd viz authz -n booksapp deploy/authors +$ linkerd viz authz -n booksapp deploy/authors ROUTE SERVER AUTHORIZATION UNAUTHORIZED SUCCESS RPS LATENCY_P50 LATENCY_P95 LATENCY_P99 authors-get-route authors-server authorizationpolicy/authors-get-policy - - - - - - authors-modify-route authors-server 9.7rps 0.00% 0.0rps 0ms 0ms 0ms @@ -442,7 +442,7 @@ Running the `linkerd viz authz` command one last time, we now see that all traffic is authorized: ```bash -linkerd viz authz -n booksapp deploy/authors +$ linkerd viz authz -n booksapp deploy/authors ROUTE SERVER AUTHORIZATION UNAUTHORIZED SUCCESS RPS LATENCY_P50 LATENCY_P95 LATENCY_P99 authors-get-route authors-server authorizationpolicy/authors-get-policy 0.0rps 100.00% 0.1rps 0ms 0ms 0ms authors-modify-route authors-server authorizationpolicy/authors-modify-policy 0.0rps 100.00% 0.0rps 0ms 0ms 0ms diff --git a/linkerd.io/content/2.15/tasks/multicluster-using-statefulsets.md b/linkerd.io/content/2.15/tasks/multicluster-using-statefulsets.md index 0bdc7f7d24..cd6104f42f 100644 --- a/linkerd.io/content/2.15/tasks/multicluster-using-statefulsets.md +++ b/linkerd.io/content/2.15/tasks/multicluster-using-statefulsets.md @@ -48,8 +48,8 @@ The first step is to clone the demo repository on your local machine. ```sh # clone example repository -$ git clone git@github.com:mateiidavid/l2d-k3d-statefulset.git -$ cd l2d-k3d-statefulset +git clone git@github.com:mateiidavid/l2d-k3d-statefulset.git +cd l2d-k3d-statefulset ``` The second step consists of creating two `k3d` clusters named `east` and `west`, @@ -178,7 +178,7 @@ nginx-set-1 2/2 Running 0 4m58s nginx-set-2 2/2 Running 0 4m51s curl-56dc7d945d-s4n8j 0/2 PodInitializing 0 4s -$ kubectl --context=k3d-west exec -it curl-56dc7d945d-s4n8j -c curl -- bin/sh +$ kubectl --context=k3d-west exec -it curl-56dc7d945d-s4n8j -c curl -- sh /# prompt for curl pod ``` @@ -186,7 +186,7 @@ If we now curl one of these instances, we will get back a response. ```sh # exec'd on the pod -/ $ curl nginx-set-0.nginx-svc.default.svc.west.cluster.local +$ curl nginx-set-0.nginx-svc.default.svc.west.cluster.local " @@ -235,7 +235,7 @@ endpoints for `nginx-svc-west` will have the same hostnames, but each hostname will point to one of the services we see above: ```sh -kubectl --context=k3d-east get endpoints nginx-svc-west -o yaml +$ kubectl --context=k3d-east get endpoints nginx-svc-k3d-west -o yaml subsets: - addresses: - hostname: nginx-set-0 @@ -256,12 +256,12 @@ NAME READY STATUS RESTARTS AGE curl-56dc7d945d-96r6p 2/2 Running 0 23m # exec and curl -$ kubectl --context=k3d-east exec pod curl-56dc7d945d-96r6p -it -c curl -- bin/sh +$ kubectl --context=k3d-east exec curl-56dc7d945d-96r6p -it -c curl -- sh # we want to curl the same hostname we see in the endpoints object above. # however, the service and cluster domain will now be different, since we # are in a different cluster. # -/ $ curl nginx-set-0.nginx-svc-west.default.svc.east.cluster.local +$ curl nginx-set-0.nginx-svc-west.default.svc.east.cluster.local diff --git a/linkerd.io/content/2.15/tasks/multicluster.md b/linkerd.io/content/2.15/tasks/multicluster.md index ece25f2732..70c6a8c454 100644 --- a/linkerd.io/content/2.15/tasks/multicluster.md +++ b/linkerd.io/content/2.15/tasks/multicluster.md @@ -101,13 +101,13 @@ With a valid trust anchor and issuer credentials, we can install Linkerd on your ```bash # first, install the Linkerd CRDs in both clusters -$ linkerd install --crds \ +linkerd install --crds \ | tee \ >(kubectl --context=west apply -f -) \ >(kubectl --context=east apply -f -) # then install the Linkerd control plane in both clusters -$ linkerd install \ +linkerd install \ --identity-trust-anchors-file root.crt \ --identity-issuer-certificate-file issuer.crt \ --identity-issuer-key-file issuer.key \ diff --git a/linkerd.io/content/2.15/tasks/restricting-access.md b/linkerd.io/content/2.15/tasks/restricting-access.md index 38ebdaeb3d..08e06534f1 100644 --- a/linkerd.io/content/2.15/tasks/restricting-access.md +++ b/linkerd.io/content/2.15/tasks/restricting-access.md @@ -21,9 +21,9 @@ haven't already done this. Inject and install the Emojivoto application: ```bash -linkerd inject https://run.linkerd.io/emojivoto.yml | kubectl apply -f - +$ linkerd inject https://run.linkerd.io/emojivoto.yml | kubectl apply -f - ... -linkerd check -n emojivoto --proxy -o short +$ linkerd check -n emojivoto --proxy -o short ... ``` @@ -68,7 +68,7 @@ of requests coming to the voting service and see that all incoming requests to the voting-grpc server are currently unauthorized: ```bash -> linkerd viz authz -n emojivoto deploy/voting +$ linkerd viz authz -n emojivoto deploy/voting ROUTE SERVER AUTHORIZATION UNAUTHORIZED SUCCESS RPS LATENCY_P50 LATENCY_P95 LATENCY_P99 default default:all-unauthenticated default/all-unauthenticated 0.0rps 100.00% 0.1rps 1ms 1ms 1ms probe default:all-unauthenticated default/probe 0.0rps 100.00% 0.2rps 1ms 1ms 1ms @@ -84,7 +84,7 @@ to the Voting `Server` we created above. Note that meshed mTLS uses based on `ServiceAccounts`. ```bash -kubectl apply -f - < linkerd viz authz -n emojivoto deploy/voting +$ linkerd viz authz -n emojivoto deploy/voting ROUTE SERVER AUTHORIZATION UNAUTHORIZED SUCCESS RPS LATENCY_P50 LATENCY_P95 LATENCY_P99 default default:all-unauthenticated default/all-unauthenticated 0.0rps 100.00% 0.1rps 1ms 1ms 1ms probe default:all-unauthenticated default/probe 0.0rps 100.00% 0.2rps 1ms 1ms 1ms @@ -123,7 +123,7 @@ We can also test that request from other pods will be rejected by creating a `grpcurl` pod and attempting to access the Voting service from it: ```bash -> kubectl run grpcurl --rm -it --image=networld/grpcurl --restart=Never --command -- ./grpcurl -plaintext voting-svc.emojivoto:8080 emojivoto.v1.VotingService/VoteDog +$ kubectl run grpcurl --rm -it --image=networld/grpcurl --restart=Never --command -- ./grpcurl -plaintext voting-svc.emojivoto:8080 emojivoto.v1.VotingService/VoteDog Error invoking method "emojivoto.v1.VotingService/VoteDog": failed to query for service descriptor "emojivoto.v1.VotingService": rpc error: code = PermissionDenied desc = pod "grpcurl" deleted pod default/grpcurl terminated (Error) @@ -153,7 +153,7 @@ following logic when deciding whether to allow a request: We can set the default policy to `deny` using the `linkerd upgrade` command: ```bash -> linkerd upgrade --default-inbound-policy deny | kubectl apply -f - +linkerd upgrade --default-inbound-policy deny | kubectl apply -f - ``` Alternatively, default policies can be set on individual workloads or namespaces @@ -167,7 +167,7 @@ explicitly create an authorization to allow those probe requests. For more information about adding route-scoped authorizations, see [Configuring Per-Route Policy](configuring-per-route-policy/). -## Further Considerations +## Enabling authorization policies in live systems You may have noticed that there was a period of time after we created the `Server` resource but before we created the `ServerAuthorization` where all diff --git a/linkerd.io/content/2.15/tasks/securing-linkerd-tap.md b/linkerd.io/content/2.15/tasks/securing-linkerd-tap.md index 639f81692f..8a802c890c 100644 --- a/linkerd.io/content/2.15/tasks/securing-linkerd-tap.md +++ b/linkerd.io/content/2.15/tasks/securing-linkerd-tap.md @@ -60,7 +60,7 @@ kubectl auth can-i watch deployments.tap.linkerd.io -n emojivoto --as $(whoami) You can also use the Linkerd CLI's `--as` flag to confirm: ```bash -linkerd viz tap -n linkerd deploy/linkerd-controller --as $(whoami) +$ linkerd viz tap -n linkerd deploy/linkerd-controller --as $(whoami) Cannot connect to Linkerd Viz: namespaces is forbidden: User "XXXX" cannot list resource "namespaces" in API group "" at the cluster scope Validate the install with: linkerd viz check ... @@ -77,7 +77,7 @@ To enable tap access to all resources in all namespaces, you may bind your user to the `linkerd-linkerd-tap-admin` ClusterRole, installed by default: ```bash -kubectl describe clusterroles/linkerd-linkerd-viz-tap-admin +$ kubectl describe clusterroles/linkerd-linkerd-viz-tap-admin Name: linkerd-linkerd-viz-tap-admin Labels: component=tap linkerd.io/extension=viz @@ -109,7 +109,7 @@ kubectl create clusterrolebinding \ You can verify you now have tap access with: ```bash -linkerd viz tap -n linkerd deploy/linkerd-controller --as $(whoami) +$ linkerd viz tap -n linkerd deploy/linkerd-controller --as $(whoami) req id=3:0 proxy=in src=10.244.0.1:37392 dst=10.244.0.13:9996 tls=not_provided_by_remote :method=GET :authority=10.244.0.13:9996 :path=/ping ... ``` @@ -143,14 +143,14 @@ Because GCloud provides this additional level of access, there are cases where not. To validate this, check whether your GCloud user has Tap access: ```bash -kubectl auth can-i watch pods.tap.linkerd.io --all-namespaces +$ kubectl auth can-i watch pods.tap.linkerd.io --all-namespaces yes ``` And then validate whether your RBAC user has Tap access: ```bash -kubectl auth can-i watch pods.tap.linkerd.io --all-namespaces --as $(gcloud config get-value account) +$ kubectl auth can-i watch pods.tap.linkerd.io --all-namespaces --as $(gcloud config get-value account) no - no RBAC policy matched ``` @@ -187,14 +187,14 @@ privileges necessary to tap resources. To confirm: ```bash -kubectl auth can-i watch pods.tap.linkerd.io --all-namespaces --as system:serviceaccount:linkerd-viz:web +$ kubectl auth can-i watch pods.tap.linkerd.io --all-namespaces --as system:serviceaccount:linkerd-viz:web yes ``` This access is enabled via a `linkerd-linkerd-viz-web-admin` ClusterRoleBinding: ```bash -kubectl describe clusterrolebindings/linkerd-linkerd-viz-web-admin +$ kubectl describe clusterrolebindings/linkerd-linkerd-viz-web-admin Name: linkerd-linkerd-viz-web-admin Labels: component=web linkerd.io/extensions=viz @@ -227,6 +227,6 @@ kubectl delete clusterrolebindings/linkerd-linkerd-viz-web-admin To confirm: ```bash -kubectl auth can-i watch pods.tap.linkerd.io --all-namespaces --as system:serviceaccount:linkerd-viz:web +$ kubectl auth can-i watch pods.tap.linkerd.io --all-namespaces --as system:serviceaccount:linkerd-viz:web no ``` diff --git a/linkerd.io/content/2.15/tasks/troubleshooting.md b/linkerd.io/content/2.15/tasks/troubleshooting.md index c5dd2f3e0a..194a7b5b8b 100644 --- a/linkerd.io/content/2.15/tasks/troubleshooting.md +++ b/linkerd.io/content/2.15/tasks/troubleshooting.md @@ -862,7 +862,7 @@ Ensure you can connect to the Linkerd version check endpoint from the environment the `linkerd` cli is running: ```bash -curl "https://versioncheck.linkerd.io/version.json?version=edge-19.1.2&uuid=test-uuid&source=cli" +$ curl "https://versioncheck.linkerd.io/version.json?version=edge-19.1.2&uuid=test-uuid&source=cli" {"stable":"stable-2.1.0","edge":"edge-19.1.2"} ``` @@ -1965,7 +1965,7 @@ Ensure you can connect to the Linkerd Buoyant version check endpoint from the environment the `linkerd` cli is running: ```bash -curl https://buoyant.cloud/version.json +$ curl https://buoyant.cloud/version.json {"linkerd-buoyant":"v0.4.4"} ``` @@ -2272,7 +2272,7 @@ Ensure the `buoyant-cloud-metrics` pods are injected, the `READY` column should show `2/2`: ```bash -kubectl -n buoyant-cloud get pods --selector app=buoyant-cloud-metrics +$ kubectl -n buoyant-cloud get pods --selector app=buoyant-cloud-metrics NAME READY STATUS RESTARTS AGE buoyant-cloud-metrics-kt9mv 2/2 Running 0 166m buoyant-cloud-metrics-q8jhj 2/2 Running 0 166m diff --git a/linkerd.io/content/2.16/tasks/configuring-dynamic-request-routing.md b/linkerd.io/content/2.16/tasks/configuring-dynamic-request-routing.md index a44d12a1a5..004b50ded6 100644 --- a/linkerd.io/content/2.16/tasks/configuring-dynamic-request-routing.md +++ b/linkerd.io/content/2.16/tasks/configuring-dynamic-request-routing.md @@ -67,7 +67,7 @@ Requests to `/echo` on port 9898 to the frontend pod will get forwarded the pod pointed by the Service `backend-a-podinfo`: ```bash -curl -sX POST localhost:9898/echo \ +$ curl -sX POST localhost:9898/echo \ | grep -o 'PODINFO_UI_MESSAGE=. backend' PODINFO_UI_MESSAGE=A backend @@ -132,7 +132,7 @@ the `backend-a-podinfo` Service. The previous requests should still reach `backend-a-podinfo` only: ```bash -curl -sX POST localhost:9898/echo \ +$ curl -sX POST localhost:9898/echo \ | grep -o 'PODINFO_UI_MESSAGE=. backend' PODINFO_UI_MESSAGE=A backend @@ -142,7 +142,7 @@ But if we add the `x-request-id: alternative` header, they get routed to `backend-b-podinfo`: ```bash -curl -sX POST \ +$ curl -sX POST \ -H 'x-request-id: alternative' \ localhost:9898/echo \ | grep -o 'PODINFO_UI_MESSAGE=. backend' diff --git a/linkerd.io/content/2.16/tasks/configuring-per-route-policy.md b/linkerd.io/content/2.16/tasks/configuring-per-route-policy.md index 63b79fc6d4..ea27641a9c 100644 --- a/linkerd.io/content/2.16/tasks/configuring-per-route-policy.md +++ b/linkerd.io/content/2.16/tasks/configuring-per-route-policy.md @@ -87,7 +87,7 @@ First, let's run the `linkerd viz authz` command to list the authorization resources that currently exist for the `authors` deployment: ```bash -linkerd viz authz -n booksapp deploy/authors +$ linkerd viz authz -n booksapp deploy/authors ROUTE SERVER AUTHORIZATION UNAUTHORIZED SUCCESS RPS LATENCY_P50 LATENCY_P95 LATENCY_P99 default default:all-unauthenticated default/all-unauthenticated 0.0rps 70.31% 8.1rps 1ms 43ms 49ms probe default:all-unauthenticated default/probe 0.0rps 100.00% 0.3rps 1ms 1ms 1ms @@ -124,7 +124,7 @@ Now that we've defined a [`Server`] for the authors `Deployment`, we can run the currently unauthorized: ```bash -linkerd viz authz -n booksapp deploy/authors +$ linkerd viz authz -n booksapp deploy/authors ROUTE SERVER AUTHORIZATION UNAUTHORIZED SUCCESS RPS LATENCY_P50 LATENCY_P95 LATENCY_P99 default authors-server 9.5rps 0.00% 0.0rps 0ms 0ms 0ms probe authors-server default/probe 0.0rps 100.00% 0.1rps 1ms 1ms 1ms @@ -312,7 +312,7 @@ network (0.0.0.0). Running `linkerd viz authz` again, we can now see that our new policies exist: ```bash -linkerd viz authz -n booksapp deploy/authors +$ linkerd viz authz -n booksapp deploy/authors ROUTE SERVER AUTHORIZATION UNAUTHORIZED SUCCESS RPS LATENCY_P50 LATENCY_P95 LATENCY_P99 authors-get-route authors-server authorizationpolicy/authors-get-policy 0.0rps 100.00% 0.1rps 2ms 2ms 2ms authors-probe-route authors-server authorizationpolicy/authors-probe-policy 0.0rps 100.00% 0.1rps 1ms 1ms 1ms @@ -383,7 +383,7 @@ requests, but we haven't _authorized_ requests to that route. Running the requests to `authors-modify-route`: ```bash -linkerd viz authz -n booksapp deploy/authors +$ linkerd viz authz -n booksapp deploy/authors ROUTE SERVER AUTHORIZATION UNAUTHORIZED SUCCESS RPS LATENCY_P50 LATENCY_P95 LATENCY_P99 authors-get-route authors-server authorizationpolicy/authors-get-policy - - - - - - authors-modify-route authors-server 9.7rps 0.00% 0.0rps 0ms 0ms 0ms @@ -442,7 +442,7 @@ Running the `linkerd viz authz` command one last time, we now see that all traffic is authorized: ```bash -linkerd viz authz -n booksapp deploy/authors +$ linkerd viz authz -n booksapp deploy/authors ROUTE SERVER AUTHORIZATION UNAUTHORIZED SUCCESS RPS LATENCY_P50 LATENCY_P95 LATENCY_P99 authors-get-route authors-server authorizationpolicy/authors-get-policy 0.0rps 100.00% 0.1rps 0ms 0ms 0ms authors-modify-route authors-server authorizationpolicy/authors-modify-policy 0.0rps 100.00% 0.0rps 0ms 0ms 0ms diff --git a/linkerd.io/content/2.16/tasks/multicluster-using-statefulsets.md b/linkerd.io/content/2.16/tasks/multicluster-using-statefulsets.md index c8d4400521..729e6c13c0 100644 --- a/linkerd.io/content/2.16/tasks/multicluster-using-statefulsets.md +++ b/linkerd.io/content/2.16/tasks/multicluster-using-statefulsets.md @@ -60,10 +60,10 @@ everything. ```sh # create k3d clusters -./create.sh +$ ./create.sh # list the clusters -k3d cluster list +$ k3d cluster list NAME SERVERS AGENTS LOADBALANCER east 1/1 0/0 true west 1/1 0/0 true @@ -78,10 +78,10 @@ provided scripts, but feel free to have a look! ```sh # Install Linkerd and multicluster, output to check should be a success -./install.sh +$ ./install.sh # Next, link the two clusters together -./link.sh +$ ./link.sh ``` Perfect! If you've made it this far with no errors, then it's a good sign. In @@ -101,17 +101,17 @@ communication. First, we will deploy our pods and services: ```sh # deploy services and mesh namespaces -./deploy.sh +$ ./deploy.sh # verify both clusters # # verify east -kubectl --context=k3d-east get pods +$ kubectl --context=k3d-east get pods NAME READY STATUS RESTARTS AGE curl-56dc7d945d-96r6p 2/2 Running 0 7s # verify west has headless service -kubectl --context=k3d-west get services +$ kubectl --context=k3d-west get services NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE kubernetes ClusterIP 10.43.0.1 443/TCP 10m nginx-svc ClusterIP None 80/TCP 8s @@ -119,7 +119,7 @@ nginx-svc ClusterIP None 80/TCP 8s # verify west has statefulset # # this may take a while to come up -kubectl --context=k3d-west get pods +$ kubectl --context=k3d-west get pods NAME READY STATUS RESTARTS AGE nginx-set-0 2/2 Running 0 53s nginx-set-1 2/2 Running 0 43s @@ -130,7 +130,7 @@ Before we go further, let's have a look at the endpoints object for the `nginx-svc`: ```sh -kubectl --context=k3d-west get endpoints nginx-svc -o yaml +$ kubectl --context=k3d-west get endpoints nginx-svc -o yaml ... subsets: - addresses: @@ -170,15 +170,15 @@ would get an answer back. We can test this out by applying the curl pod to the `west` cluster: ```sh -kubectl --context=k3d-west apply -f east/curl.yml -kubectl --context=k3d-west get pods +$ kubectl --context=k3d-west apply -f east/curl.yml +$ kubectl --context=k3d-west get pods NAME READY STATUS RESTARTS AGE nginx-set-0 2/2 Running 0 5m8s nginx-set-1 2/2 Running 0 4m58s nginx-set-2 2/2 Running 0 4m51s curl-56dc7d945d-s4n8j 0/2 PodInitializing 0 4s -kubectl --context=k3d-west exec -it curl-56dc7d945d-s4n8j -c curl -- bin/sh +$ kubectl --context=k3d-west exec -it curl-56dc7d945d-s4n8j -c curl -- sh /# prompt for curl pod ``` @@ -186,7 +186,7 @@ If we now curl one of these instances, we will get back a response. ```sh # exec'd on the pod -/ curl nginx-set-0.nginx-svc.default.svc.west.cluster.local +$ curl nginx-set-0.nginx-svc.default.svc.west.cluster.local " @@ -218,7 +218,7 @@ Now, let's do the same, but this time from the `east` cluster. We will first export the service. ```sh -kubectl --context=k3d-west label service nginx-svc mirror.linkerd.io/exported="true" +$ kubectl --context=k3d-west label service nginx-svc mirror.linkerd.io/exported="true" service/nginx-svc labeled kubectl --context=k3d-east get services @@ -235,7 +235,7 @@ endpoints for `nginx-svc-west` will have the same hostnames, but each hostname will point to one of the services we see above: ```sh -kubectl --context=k3d-east get endpoints nginx-svc-west -o yaml +$ kubectl --context=k3d-east get endpoints nginx-svc-k3d-west -o yaml subsets: - addresses: - hostname: nginx-set-0 @@ -251,17 +251,17 @@ cluster (`west`), will be mirrored as a clusterIP service. We will see in a second why this matters. ```sh -kubectl --context=k3d-east get pods +$ kubectl --context=k3d-east get pods NAME READY STATUS RESTARTS AGE curl-56dc7d945d-96r6p 2/2 Running 0 23m # exec and curl -kubectl --context=k3d-east exec pod curl-56dc7d945d-96r6p -it -c curl -- bin/sh +kubectl --context=k3d-east exec curl-56dc7d945d-96r6p -it -c curl -- sh # we want to curl the same hostname we see in the endpoints object above. # however, the service and cluster domain will now be different, since we # are in a different cluster. # -/ curl nginx-set-0.nginx-svc-west.default.svc.east.cluster.local +$ curl nginx-set-0.nginx-svc-west.default.svc.east.cluster.local @@ -329,8 +329,8 @@ validation. To clean-up, you can remove both clusters entirely using the k3d CLI: ```sh -k3d cluster delete east +$ k3d cluster delete east cluster east deleted -k3d cluster delete west +$ k3d cluster delete west cluster west deleted ``` diff --git a/linkerd.io/content/2.16/tasks/restricting-access.md b/linkerd.io/content/2.16/tasks/restricting-access.md index d949af35f6..fdcbf4a1f6 100644 --- a/linkerd.io/content/2.16/tasks/restricting-access.md +++ b/linkerd.io/content/2.16/tasks/restricting-access.md @@ -21,9 +21,9 @@ haven't already done this. Inject and install the Emojivoto application: ```bash -linkerd inject https://run.linkerd.io/emojivoto.yml | kubectl apply -f - +$ linkerd inject https://run.linkerd.io/emojivoto.yml | kubectl apply -f - ... -linkerd check -n emojivoto --proxy -o short +$ linkerd check -n emojivoto --proxy -o short ... ``` @@ -84,7 +84,7 @@ to the Voting `Server` we created above. Note that meshed mTLS uses based on `ServiceAccounts`. ```bash -$ kubectl apply -f - < 443/TCP 10m nginx-svc ClusterIP None 80/TCP 8s @@ -119,7 +119,7 @@ nginx-svc ClusterIP None 80/TCP 8s # verify west has statefulset # # this may take a while to come up -kubectl --context=k3d-west get pods +$ kubectl --context=k3d-west get pods NAME READY STATUS RESTARTS AGE nginx-set-0 2/2 Running 0 53s nginx-set-1 2/2 Running 0 43s @@ -130,7 +130,7 @@ Before we go further, let's have a look at the endpoints object for the `nginx-svc`: ```sh -kubectl --context=k3d-west get endpoints nginx-svc -o yaml +$ kubectl --context=k3d-west get endpoints nginx-svc -o yaml ... subsets: - addresses: @@ -170,15 +170,15 @@ would get an answer back. We can test this out by applying the curl pod to the `west` cluster: ```sh -kubectl --context=k3d-west apply -f east/curl.yml -kubectl --context=k3d-west get pods +$ kubectl --context=k3d-west apply -f east/curl.yml +$ kubectl --context=k3d-west get pods NAME READY STATUS RESTARTS AGE nginx-set-0 2/2 Running 0 5m8s nginx-set-1 2/2 Running 0 4m58s nginx-set-2 2/2 Running 0 4m51s curl-56dc7d945d-s4n8j 0/2 PodInitializing 0 4s -kubectl --context=k3d-west exec -it curl-56dc7d945d-s4n8j -c curl -- bin/sh +$ kubectl --context=k3d-west exec -it curl-56dc7d945d-s4n8j -c curl -- sh /# prompt for curl pod ``` @@ -186,7 +186,7 @@ If we now curl one of these instances, we will get back a response. ```sh # exec'd on the pod -/ curl nginx-set-0.nginx-svc.default.svc.west.cluster.local +$ curl nginx-set-0.nginx-svc.default.svc.west.cluster.local " @@ -218,7 +218,7 @@ Now, let's do the same, but this time from the `east` cluster. We will first export the service. ```sh -kubectl --context=k3d-west label service nginx-svc mirror.linkerd.io/exported="true" +$ kubectl --context=k3d-west label service nginx-svc mirror.linkerd.io/exported="true" service/nginx-svc labeled kubectl --context=k3d-east get services @@ -235,7 +235,7 @@ endpoints for `nginx-svc-west` will have the same hostnames, but each hostname will point to one of the services we see above: ```sh -kubectl --context=k3d-east get endpoints nginx-svc-west -o yaml +$ kubectl --context=k3d-east get endpoints nginx-svc-k3d-west -o yaml subsets: - addresses: - hostname: nginx-set-0 @@ -251,17 +251,17 @@ cluster (`west`), will be mirrored as a clusterIP service. We will see in a second why this matters. ```sh -kubectl --context=k3d-east get pods +$ kubectl --context=k3d-east get pods NAME READY STATUS RESTARTS AGE curl-56dc7d945d-96r6p 2/2 Running 0 23m # exec and curl -kubectl --context=k3d-east exec pod curl-56dc7d945d-96r6p -it -c curl -- bin/sh +$ kubectl --context=k3d-east exec curl-56dc7d945d-96r6p -it -c curl -- sh # we want to curl the same hostname we see in the endpoints object above. # however, the service and cluster domain will now be different, since we # are in a different cluster. # -/ curl nginx-set-0.nginx-svc-west.default.svc.east.cluster.local +$ curl nginx-set-0.nginx-svc-west.default.svc.east.cluster.local @@ -329,8 +329,8 @@ validation. To clean-up, you can remove both clusters entirely using the k3d CLI: ```sh -k3d cluster delete east +$ k3d cluster delete east cluster east deleted -k3d cluster delete west +$ k3d cluster delete west cluster west deleted ``` diff --git a/linkerd.io/content/2.17/tasks/restricting-access.md b/linkerd.io/content/2.17/tasks/restricting-access.md index d949af35f6..a5787cf354 100644 --- a/linkerd.io/content/2.17/tasks/restricting-access.md +++ b/linkerd.io/content/2.17/tasks/restricting-access.md @@ -21,9 +21,9 @@ haven't already done this. Inject and install the Emojivoto application: ```bash -linkerd inject https://run.linkerd.io/emojivoto.yml | kubectl apply -f - +$ linkerd inject https://run.linkerd.io/emojivoto.yml | kubectl apply -f - ... -linkerd check -n emojivoto --proxy -o short +$ linkerd check -n emojivoto --proxy -o short ... ``` @@ -35,7 +35,7 @@ Linkerd custom resource which describes a specific port of a workload. Once the access it (we'll see how to authorize clients in a moment). ```bash -$ kubectl apply -f - <}} @@ -235,7 +235,7 @@ Interestingly enough though, if we go back to our client shell and we try to initiate HTTPS traffic to the same service, it will not be allowed: ```bash -curl -v https://httpbin.org/get +$ curl -v https://httpbin.org/get curl: (35) TLS connect error: error:00000000:lib(0)::reason(0) ``` @@ -271,7 +271,7 @@ This fixes the problem and we can see HTTPS requests to the external service succeeding reflected in the metrics: ```bash -linkerd dg proxy-metrics -n egress-test po/client | grep outbound_tls_route_open_total +$ linkerd dg proxy-metrics -n egress-test po/client | grep outbound_tls_route_open_total outbound_tls_route_open_total{ parent_group="policy.linkerd.io", @@ -296,7 +296,7 @@ our client, we will see the proxy eagerly closing the connection because it is not forbidden by our current policy configuration: ```bash -linkerd dg proxy-metrics -n egress-test po/client | grep outbound_tls_route_close_total +$ linkerd dg proxy-metrics -n egress-test po/client | grep outbound_tls_route_close_total outbound_tls_route_close_total{ parent_group="policy.linkerd.io", @@ -458,7 +458,7 @@ Now let's verify all works as expected: ```bash # plaintext traffic goes as expected to the /get path -curl http://httpbin.org/get +$ curl https://httpbin.org/get { "args": {}, "headers": { @@ -472,14 +472,14 @@ curl http://httpbin.org/get } # encrypted traffic can target all paths and hosts -curl https://httpbin.org/ip +$ curl https://httpbin.org/ip { "origin": "51.116.126.217" } # arbitrary unencrypted traffic goes to the internal service -curl http://google.com +$ curl https://google.com { "requestUID": "in:http-sid:terminus-grpc:-1-h1:80-190120723", "payload": "You cannot go there right now"} diff --git a/linkerd.io/content/2.18/tasks/multicluster-using-statefulsets.md b/linkerd.io/content/2.18/tasks/multicluster-using-statefulsets.md index 83c638a4ae..f193db3a51 100644 --- a/linkerd.io/content/2.18/tasks/multicluster-using-statefulsets.md +++ b/linkerd.io/content/2.18/tasks/multicluster-using-statefulsets.md @@ -60,10 +60,10 @@ everything. ```sh # create k3d clusters -./create.sh +$ ./create.sh # list the clusters -k3d cluster list +$ k3d cluster list NAME SERVERS AGENTS LOADBALANCER east 1/1 0/0 true west 1/1 0/0 true @@ -77,10 +77,10 @@ controllers and links are generated for both clusters. ```sh # Install Linkerd and multicluster, output to check should be a success -./install.sh +$ ./install.sh # Next, link the two clusters together -./link.sh +$ ./link.sh ``` Perfect! If you've made it this far with no errors, then it's a good sign. In @@ -100,17 +100,17 @@ communication. First, we will deploy our pods and services: ```sh # deploy services and mesh namespaces -./deploy.sh +$ ./deploy.sh # verify both clusters # # verify east -kubectl --context=k3d-east get pods +$ kubectl --context=k3d-east get pods NAME READY STATUS RESTARTS AGE curl-56dc7d945d-96r6p 2/2 Running 0 7s # verify west has headless service -kubectl --context=k3d-west get services +$ kubectl --context=k3d-west get services NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE kubernetes ClusterIP 10.43.0.1 443/TCP 10m nginx-svc ClusterIP None 80/TCP 8s @@ -118,7 +118,7 @@ nginx-svc ClusterIP None 80/TCP 8s # verify west has statefulset # # this may take a while to come up -kubectl --context=k3d-west get pods +$ kubectl --context=k3d-west get pods NAME READY STATUS RESTARTS AGE nginx-set-0 2/2 Running 0 53s nginx-set-1 2/2 Running 0 43s @@ -129,7 +129,7 @@ Before we go further, let's have a look at the endpoints object for the `nginx-svc`: ```sh -kubectl --context=k3d-west get endpoints nginx-svc -o yaml +$ kubectl --context=k3d-west get endpoints nginx-svc -o yaml ... subsets: - addresses: @@ -169,15 +169,15 @@ would get an answer back. We can test this out by applying the curl pod to the `west` cluster: ```sh -kubectl --context=k3d-west apply -f east/curl.yml -kubectl --context=k3d-west get pods +$ kubectl --context=k3d-west apply -f east/curl.yml +$ kubectl --context=k3d-west get pods NAME READY STATUS RESTARTS AGE nginx-set-0 2/2 Running 0 5m8s nginx-set-1 2/2 Running 0 4m58s nginx-set-2 2/2 Running 0 4m51s curl-56dc7d945d-s4n8j 0/2 PodInitializing 0 4s -kubectl --context=k3d-west exec -it curl-56dc7d945d-s4n8j -c curl -- sh +$ kubectl --context=k3d-west exec -it curl-56dc7d945d-s4n8j -c curl -- sh /# prompt for curl pod ``` @@ -185,7 +185,7 @@ If we now curl one of these instances, we will get back a response. ```sh # exec'd on the pod -/ curl nginx-set-0.nginx-svc.default.svc.west.cluster.local +$ curl nginx-set-0.nginx-svc.default.svc.west.cluster.local " @@ -217,7 +217,7 @@ Now, let's do the same, but this time from the `east` cluster. We will first export the service. ```sh -kubectl --context=k3d-west label service nginx-svc mirror.linkerd.io/exported="true" +$ kubectl --context=k3d-west label service nginx-svc mirror.linkerd.io/exported="true" service/nginx-svc labeled kubectl --context=k3d-east get services @@ -234,7 +234,7 @@ endpoints for `nginx-svc-west` will have the same hostnames, but each hostname will point to one of the services we see above: ```sh -kubectl --context=k3d-east get endpoints nginx-svc-k3d-west -o yaml +$ kubectl --context=k3d-east get endpoints nginx-svc-k3d-west -o yaml subsets: - addresses: - hostname: nginx-set-0 @@ -250,7 +250,7 @@ cluster (`west`), will be mirrored as a clusterIP service. We will see in a second why this matters. ```sh -kubectl --context=k3d-east get pods +$ kubectl --context=k3d-east get pods NAME READY STATUS RESTARTS AGE curl-56dc7d945d-96r6p 2/2 Running 0 23m @@ -260,7 +260,7 @@ kubectl --context=k3d-east exec curl-56dc7d945d-96r6p -it -c curl -- sh # however, the service and cluster domain will now be different, since we # are in a different cluster. # -/ curl nginx-set-0.nginx-svc-k3d-west.default.svc.east.cluster.local +$ curl nginx-set-0.nginx-svc-k3d-west.default.svc.east.cluster.local @@ -328,8 +328,8 @@ validation. To clean-up, you can remove both clusters entirely using the k3d CLI: ```sh -k3d cluster delete east +$ k3d cluster delete east cluster east deleted -k3d cluster delete west +$ k3d cluster delete west cluster west deleted ``` diff --git a/linkerd.io/content/2.18/tasks/multicluster.md b/linkerd.io/content/2.18/tasks/multicluster.md index 2779b7616a..5aeb332398 100644 --- a/linkerd.io/content/2.18/tasks/multicluster.md +++ b/linkerd.io/content/2.18/tasks/multicluster.md @@ -348,7 +348,7 @@ the `linkerd multicluster link-gen` command or by editing the Link resource. {{< /note >}} -Check out the service that was just created by the controller! +Check out the service that was just created by the service mirror controller! ```bash kubectl --context=west -n test get svc podinfo-east diff --git a/linkerd.io/content/2.18/tasks/restricting-access.md b/linkerd.io/content/2.18/tasks/restricting-access.md index 20dba47a35..a5787cf354 100644 --- a/linkerd.io/content/2.18/tasks/restricting-access.md +++ b/linkerd.io/content/2.18/tasks/restricting-access.md @@ -21,9 +21,9 @@ haven't already done this. Inject and install the Emojivoto application: ```bash -linkerd inject https://run.linkerd.io/emojivoto.yml | kubectl apply -f - +$ linkerd inject https://run.linkerd.io/emojivoto.yml | kubectl apply -f - ... -linkerd check -n emojivoto --proxy -o short +$ linkerd check -n emojivoto --proxy -o short ... ``` @@ -35,7 +35,7 @@ Linkerd custom resource which describes a specific port of a workload. Once the access it (we'll see how to authorize clients in a moment). ```bash -$ kubectl apply -f - <}} @@ -235,7 +235,7 @@ Interestingly enough though, if we go back to our client shell and we try to initiate HTTPS traffic to the same service, it will not be allowed: ```bash -curl -v https://httpbin.org/get +$ curl -v https://httpbin.org/get curl: (35) TLS connect error: error:00000000:lib(0)::reason(0) ``` @@ -271,7 +271,7 @@ This fixes the problem and we can see HTTPS requests to the external service succeeding reflected in the metrics: ```bash -linkerd dg proxy-metrics -n egress-test po/client | grep outbound_tls_route_open_total +$ linkerd dg proxy-metrics -n egress-test po/client | grep outbound_tls_route_open_total outbound_tls_route_open_total{ parent_group="policy.linkerd.io", @@ -296,7 +296,7 @@ our client, we will see the proxy eagerly closing the connection because it is not forbidden by our current policy configuration: ```bash -linkerd dg proxy-metrics -n egress-test po/client | grep outbound_tls_route_close_total +$ linkerd dg proxy-metrics -n egress-test po/client | grep outbound_tls_route_close_total outbound_tls_route_close_total{ parent_group="policy.linkerd.io", @@ -458,7 +458,7 @@ Now let's verify all works as expected: ```bash # plaintext traffic goes as expected to the /get path -curl http://httpbin.org/get +$ curl https://httpbin.org/get { "args": {}, "headers": { @@ -472,14 +472,14 @@ curl http://httpbin.org/get } # encrypted traffic can target all paths and hosts -curl https://httpbin.org/ip +$ curl https://httpbin.org/ip { "origin": "51.116.126.217" } # arbitrary unencrypted traffic goes to the internal service -curl http://google.com +$ curl https://google.com { "requestUID": "in:http-sid:terminus-grpc:-1-h1:80-190120723", "payload": "You cannot go there right now"} diff --git a/linkerd.io/content/2.19/tasks/multicluster-using-statefulsets.md b/linkerd.io/content/2.19/tasks/multicluster-using-statefulsets.md index 83c638a4ae..ef1f3dff24 100644 --- a/linkerd.io/content/2.19/tasks/multicluster-using-statefulsets.md +++ b/linkerd.io/content/2.19/tasks/multicluster-using-statefulsets.md @@ -60,10 +60,10 @@ everything. ```sh # create k3d clusters -./create.sh +$ ./create.sh # list the clusters -k3d cluster list +$ k3d cluster list NAME SERVERS AGENTS LOADBALANCER east 1/1 0/0 true west 1/1 0/0 true @@ -77,10 +77,10 @@ controllers and links are generated for both clusters. ```sh # Install Linkerd and multicluster, output to check should be a success -./install.sh +$ ./install.sh # Next, link the two clusters together -./link.sh +$ ./link.sh ``` Perfect! If you've made it this far with no errors, then it's a good sign. In @@ -100,17 +100,17 @@ communication. First, we will deploy our pods and services: ```sh # deploy services and mesh namespaces -./deploy.sh +$ ./deploy.sh # verify both clusters # # verify east -kubectl --context=k3d-east get pods +$ kubectl --context=k3d-east get pods NAME READY STATUS RESTARTS AGE curl-56dc7d945d-96r6p 2/2 Running 0 7s # verify west has headless service -kubectl --context=k3d-west get services +$ kubectl --context=k3d-west get services NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE kubernetes ClusterIP 10.43.0.1 443/TCP 10m nginx-svc ClusterIP None 80/TCP 8s @@ -118,7 +118,7 @@ nginx-svc ClusterIP None 80/TCP 8s # verify west has statefulset # # this may take a while to come up -kubectl --context=k3d-west get pods +$ kubectl --context=k3d-west get pods NAME READY STATUS RESTARTS AGE nginx-set-0 2/2 Running 0 53s nginx-set-1 2/2 Running 0 43s @@ -129,7 +129,7 @@ Before we go further, let's have a look at the endpoints object for the `nginx-svc`: ```sh -kubectl --context=k3d-west get endpoints nginx-svc -o yaml +$ kubectl --context=k3d-west get endpoints nginx-svc -o yaml ... subsets: - addresses: @@ -169,15 +169,15 @@ would get an answer back. We can test this out by applying the curl pod to the `west` cluster: ```sh -kubectl --context=k3d-west apply -f east/curl.yml -kubectl --context=k3d-west get pods +$ kubectl --context=k3d-west apply -f east/curl.yml +$ kubectl --context=k3d-west get pods NAME READY STATUS RESTARTS AGE nginx-set-0 2/2 Running 0 5m8s nginx-set-1 2/2 Running 0 4m58s nginx-set-2 2/2 Running 0 4m51s curl-56dc7d945d-s4n8j 0/2 PodInitializing 0 4s -kubectl --context=k3d-west exec -it curl-56dc7d945d-s4n8j -c curl -- sh +$ kubectl --context=k3d-west exec -it curl-56dc7d945d-s4n8j -c curl -- sh /# prompt for curl pod ``` @@ -185,7 +185,7 @@ If we now curl one of these instances, we will get back a response. ```sh # exec'd on the pod -/ curl nginx-set-0.nginx-svc.default.svc.west.cluster.local +$ curl nginx-set-0.nginx-svc.default.svc.west.cluster.local " @@ -217,10 +217,10 @@ Now, let's do the same, but this time from the `east` cluster. We will first export the service. ```sh -kubectl --context=k3d-west label service nginx-svc mirror.linkerd.io/exported="true" +$ kubectl --context=k3d-west label service nginx-svc mirror.linkerd.io/exported="true" service/nginx-svc labeled -kubectl --context=k3d-east get services +$ kubectl --context=k3d-east get services NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE kubernetes ClusterIP 10.43.0.1 443/TCP 20h nginx-svc-west ClusterIP None 80/TCP 29s @@ -234,7 +234,7 @@ endpoints for `nginx-svc-west` will have the same hostnames, but each hostname will point to one of the services we see above: ```sh -kubectl --context=k3d-east get endpoints nginx-svc-k3d-west -o yaml +$ kubectl --context=k3d-east get endpoints nginx-svc-k3d-west -o yaml subsets: - addresses: - hostname: nginx-set-0 @@ -250,17 +250,17 @@ cluster (`west`), will be mirrored as a clusterIP service. We will see in a second why this matters. ```sh -kubectl --context=k3d-east get pods +$ kubectl --context=k3d-east get pods NAME READY STATUS RESTARTS AGE curl-56dc7d945d-96r6p 2/2 Running 0 23m # exec and curl -kubectl --context=k3d-east exec curl-56dc7d945d-96r6p -it -c curl -- sh +$ kubectl --context=k3d-east exec curl-56dc7d945d-96r6p -it -c curl -- sh # we want to curl the same hostname we see in the endpoints object above. # however, the service and cluster domain will now be different, since we # are in a different cluster. # -/ curl nginx-set-0.nginx-svc-k3d-west.default.svc.east.cluster.local +$ curl nginx-set-0.nginx-svc-k3d-west.default.svc.east.cluster.local @@ -328,8 +328,8 @@ validation. To clean-up, you can remove both clusters entirely using the k3d CLI: ```sh -k3d cluster delete east +$ k3d cluster delete east cluster east deleted -k3d cluster delete west +$ k3d cluster delete west cluster west deleted ``` diff --git a/linkerd.io/content/2.19/tasks/securing-linkerd-tap.md b/linkerd.io/content/2.19/tasks/securing-linkerd-tap.md index 639f81692f..8a802c890c 100644 --- a/linkerd.io/content/2.19/tasks/securing-linkerd-tap.md +++ b/linkerd.io/content/2.19/tasks/securing-linkerd-tap.md @@ -60,7 +60,7 @@ kubectl auth can-i watch deployments.tap.linkerd.io -n emojivoto --as $(whoami) You can also use the Linkerd CLI's `--as` flag to confirm: ```bash -linkerd viz tap -n linkerd deploy/linkerd-controller --as $(whoami) +$ linkerd viz tap -n linkerd deploy/linkerd-controller --as $(whoami) Cannot connect to Linkerd Viz: namespaces is forbidden: User "XXXX" cannot list resource "namespaces" in API group "" at the cluster scope Validate the install with: linkerd viz check ... @@ -77,7 +77,7 @@ To enable tap access to all resources in all namespaces, you may bind your user to the `linkerd-linkerd-tap-admin` ClusterRole, installed by default: ```bash -kubectl describe clusterroles/linkerd-linkerd-viz-tap-admin +$ kubectl describe clusterroles/linkerd-linkerd-viz-tap-admin Name: linkerd-linkerd-viz-tap-admin Labels: component=tap linkerd.io/extension=viz @@ -109,7 +109,7 @@ kubectl create clusterrolebinding \ You can verify you now have tap access with: ```bash -linkerd viz tap -n linkerd deploy/linkerd-controller --as $(whoami) +$ linkerd viz tap -n linkerd deploy/linkerd-controller --as $(whoami) req id=3:0 proxy=in src=10.244.0.1:37392 dst=10.244.0.13:9996 tls=not_provided_by_remote :method=GET :authority=10.244.0.13:9996 :path=/ping ... ``` @@ -143,14 +143,14 @@ Because GCloud provides this additional level of access, there are cases where not. To validate this, check whether your GCloud user has Tap access: ```bash -kubectl auth can-i watch pods.tap.linkerd.io --all-namespaces +$ kubectl auth can-i watch pods.tap.linkerd.io --all-namespaces yes ``` And then validate whether your RBAC user has Tap access: ```bash -kubectl auth can-i watch pods.tap.linkerd.io --all-namespaces --as $(gcloud config get-value account) +$ kubectl auth can-i watch pods.tap.linkerd.io --all-namespaces --as $(gcloud config get-value account) no - no RBAC policy matched ``` @@ -187,14 +187,14 @@ privileges necessary to tap resources. To confirm: ```bash -kubectl auth can-i watch pods.tap.linkerd.io --all-namespaces --as system:serviceaccount:linkerd-viz:web +$ kubectl auth can-i watch pods.tap.linkerd.io --all-namespaces --as system:serviceaccount:linkerd-viz:web yes ``` This access is enabled via a `linkerd-linkerd-viz-web-admin` ClusterRoleBinding: ```bash -kubectl describe clusterrolebindings/linkerd-linkerd-viz-web-admin +$ kubectl describe clusterrolebindings/linkerd-linkerd-viz-web-admin Name: linkerd-linkerd-viz-web-admin Labels: component=web linkerd.io/extensions=viz @@ -227,6 +227,6 @@ kubectl delete clusterrolebindings/linkerd-linkerd-viz-web-admin To confirm: ```bash -kubectl auth can-i watch pods.tap.linkerd.io --all-namespaces --as system:serviceaccount:linkerd-viz:web +$ kubectl auth can-i watch pods.tap.linkerd.io --all-namespaces --as system:serviceaccount:linkerd-viz:web no ``` From d8314a2a817e5197a0a6ed3527bfdd3d2a44b725 Mon Sep 17 00:00:00 2001 From: bezarsnba Date: Sat, 31 Jan 2026 17:41:22 -0300 Subject: [PATCH 15/31] revision configurion dynamic troubleshoot and others Signed-off-by: bezarsnba --- .../configuring-dynamic-request-routing.md | 6 ++-- .../content/2-edge/tasks/troubleshooting.md | 6 ++-- .../content/2.10/reference/cli/check.md | 2 +- .../2.10/tasks/getting-per-route-metrics.md | 8 +++--- .../2.10/tasks/securing-your-cluster.md | 2 +- .../content/2.11/reference/cli/check.md | 2 +- .../2.11/tasks/getting-per-route-metrics.md | 2 +- .../tasks/multicluster-using-statefulsets.md | 2 +- .../2.11/tasks/securing-your-cluster.md | 16 +++++------ .../content/2.12/reference/cli/check.md | 2 +- .../2.12/tasks/getting-per-route-metrics.md | 8 +++--- .../tasks/multicluster-using-statefulsets.md | 6 ++-- .../content/2.13/reference/cli/check.md | 2 +- .../2.13/tasks/getting-per-route-metrics.md | 2 +- .../tasks/multicluster-using-statefulsets.md | 6 ++-- .../content/2.13/tasks/troubleshooting.md | 14 +++++----- .../content/2.14/reference/cli/check.md | 2 +- .../2.14/tasks/getting-per-route-metrics.md | 8 +++--- .../tasks/multicluster-using-statefulsets.md | 6 ++-- .../content/2.14/tasks/troubleshooting.md | 2 +- .../content/2.15/reference/cli/check.md | 2 +- .../content/2.15/tasks/distributed-tracing.md | 10 +++---- .../tasks/multicluster-using-statefulsets.md | 2 +- .../content/2.15/tasks/troubleshooting.md | 24 ++++++++-------- .../content/2.16/reference/cli/check.md | 2 +- .../tasks/multicluster-using-statefulsets.md | 6 ++-- .../content/2.16/tasks/troubleshooting.md | 26 ++++++++--------- .../content/2.17/reference/cli/check.md | 2 +- .../tasks/multicluster-using-statefulsets.md | 4 +-- .../content/2.17/tasks/troubleshooting.md | 26 ++++++++--------- .../content/2.18/reference/cli/check.md | 2 +- .../tasks/multicluster-using-statefulsets.md | 6 ++-- .../content/2.18/tasks/troubleshooting.md | 28 +++++++++---------- .../content/2.19/reference/cli/check.md | 2 +- .../tasks/multicluster-using-statefulsets.md | 2 +- .../content/2.19/tasks/troubleshooting.md | 26 ++++++++--------- 36 files changed, 137 insertions(+), 137 deletions(-) diff --git a/linkerd.io/content/2-edge/tasks/configuring-dynamic-request-routing.md b/linkerd.io/content/2-edge/tasks/configuring-dynamic-request-routing.md index a44d12a1a5..004b50ded6 100644 --- a/linkerd.io/content/2-edge/tasks/configuring-dynamic-request-routing.md +++ b/linkerd.io/content/2-edge/tasks/configuring-dynamic-request-routing.md @@ -67,7 +67,7 @@ Requests to `/echo` on port 9898 to the frontend pod will get forwarded the pod pointed by the Service `backend-a-podinfo`: ```bash -curl -sX POST localhost:9898/echo \ +$ curl -sX POST localhost:9898/echo \ | grep -o 'PODINFO_UI_MESSAGE=. backend' PODINFO_UI_MESSAGE=A backend @@ -132,7 +132,7 @@ the `backend-a-podinfo` Service. The previous requests should still reach `backend-a-podinfo` only: ```bash -curl -sX POST localhost:9898/echo \ +$ curl -sX POST localhost:9898/echo \ | grep -o 'PODINFO_UI_MESSAGE=. backend' PODINFO_UI_MESSAGE=A backend @@ -142,7 +142,7 @@ But if we add the `x-request-id: alternative` header, they get routed to `backend-b-podinfo`: ```bash -curl -sX POST \ +$ curl -sX POST \ -H 'x-request-id: alternative' \ localhost:9898/echo \ | grep -o 'PODINFO_UI_MESSAGE=. backend' diff --git a/linkerd.io/content/2-edge/tasks/troubleshooting.md b/linkerd.io/content/2-edge/tasks/troubleshooting.md index 2cd2baef60..1796e074f1 100644 --- a/linkerd.io/content/2-edge/tasks/troubleshooting.md +++ b/linkerd.io/content/2-edge/tasks/troubleshooting.md @@ -961,7 +961,7 @@ normally. Example failure: ```bash -linkerd check --proxy --namespace foo +$ linkerd check --proxy --namespace foo ... × data plane namespace exists The "foo" namespace does not exist @@ -1147,7 +1147,7 @@ Example error: Ensure that the linkerd-cni-config ConfigMap exists in the CNI namespace: ```bash -kubectl get cm linkerd-cni-config -n linkerd-cni +$ kubectl get cm linkerd-cni-config -n linkerd-cni NAME PRIV CAPS SELINUX RUNASUSER FSGROUP SUPGROUP READONLYROOTFS VOLUMES linkerd-linkerd-cni-cni false RunAsAny RunAsAny RunAsAny RunAsAny false hostPath,secret ``` @@ -1172,7 +1172,7 @@ Example error: Ensure that the cluster role exists: ```bash -kubectl get clusterrole linkerd-cni +$ $ kubectl get clusterrole linkerd-cni NAME AGE linkerd-cni 54m ``` diff --git a/linkerd.io/content/2.10/reference/cli/check.md b/linkerd.io/content/2.10/reference/cli/check.md index 578e3722d4..312891f8ef 100644 --- a/linkerd.io/content/2.10/reference/cli/check.md +++ b/linkerd.io/content/2.10/reference/cli/check.md @@ -12,7 +12,7 @@ for a full list of all the possible checks, what they do and how to fix them. ## Example output ```bash -linkerd check +$ linkerd check kubernetes-api -------------- √ can initialize the client diff --git a/linkerd.io/content/2.10/tasks/getting-per-route-metrics.md b/linkerd.io/content/2.10/tasks/getting-per-route-metrics.md index 7d6120773c..5c29298281 100644 --- a/linkerd.io/content/2.10/tasks/getting-per-route-metrics.md +++ b/linkerd.io/content/2.10/tasks/getting-per-route-metrics.md @@ -14,7 +14,7 @@ For a tutorial that shows this functionality off, check out the You can view per-route metrics in the CLI by running `linkerd viz routes`: ```bash -linkerd viz routes svc/webapp +$ linkerd viz routes svc/webapp ROUTE SERVICE SUCCESS RPS LATENCY_P50 LATENCY_P95 LATENCY_P99 GET / webapp 100.00% 0.6rps 25ms 30ms 30ms GET /authors/{id} webapp 100.00% 0.6rps 22ms 29ms 30ms @@ -34,7 +34,7 @@ specified in your service profile will end up there. It is also possible to look the metrics up by other resource types, such as: ```bash -linkerd viz routes deploy/webapp +$ linkerd viz routes deploy/webapp ROUTE SERVICE SUCCESS RPS LATENCY_P50 LATENCY_P95 LATENCY_P99 [DEFAULT] kubernetes 0.00% 0.0rps 0ms 0ms 0ms GET / webapp 100.00% 0.5rps 27ms 38ms 40ms @@ -53,7 +53,7 @@ Then, it is possible to filter all the way down to requests going from a specific resource to other services: ```bash -linkerd viz routes deploy/webapp --to svc/books +$ linkerd viz routes deploy/webapp --to svc/books ROUTE SERVICE SUCCESS RPS LATENCY_P50 LATENCY_P95 LATENCY_P99 DELETE /books/{id}.json books 100.00% 0.5rps 18ms 29ms 30ms GET /books.json books 100.00% 1.1rps 7ms 12ms 18ms @@ -70,7 +70,7 @@ If you're not seeing any metrics, there are two likely culprits. In both cases, the service points to, run: ```bash -linkerd viz tap deploy/webapp -o wide | grep req +$ linkerd viz tap deploy/webapp -o wide | grep req ``` A sample output is: diff --git a/linkerd.io/content/2.10/tasks/securing-your-cluster.md b/linkerd.io/content/2.10/tasks/securing-your-cluster.md index 6d67ae05a7..94d8f7dcc2 100644 --- a/linkerd.io/content/2.10/tasks/securing-your-cluster.md +++ b/linkerd.io/content/2.10/tasks/securing-your-cluster.md @@ -221,6 +221,6 @@ kubectl delete clusterrolebindings/linkerd-linkerd-viz-web-admin To confirm: ```bash -kubectl auth can-i watch pods.tap.linkerd.io --all-namespaces --as system:serviceaccount:linkerd-viz:web +$ kubectl auth can-i watch pods.tap.linkerd.io --all-namespaces --as system:serviceaccount:linkerd-viz:web no ``` diff --git a/linkerd.io/content/2.11/reference/cli/check.md b/linkerd.io/content/2.11/reference/cli/check.md index 578e3722d4..312891f8ef 100644 --- a/linkerd.io/content/2.11/reference/cli/check.md +++ b/linkerd.io/content/2.11/reference/cli/check.md @@ -12,7 +12,7 @@ for a full list of all the possible checks, what they do and how to fix them. ## Example output ```bash -linkerd check +$ linkerd check kubernetes-api -------------- √ can initialize the client diff --git a/linkerd.io/content/2.11/tasks/getting-per-route-metrics.md b/linkerd.io/content/2.11/tasks/getting-per-route-metrics.md index e07c7de196..424ede9217 100644 --- a/linkerd.io/content/2.11/tasks/getting-per-route-metrics.md +++ b/linkerd.io/content/2.11/tasks/getting-per-route-metrics.md @@ -76,7 +76,7 @@ linkerd viz tap deploy/webapp -o wide | grep req A sample output is: ```bash -$ req id=3:1 proxy=in src=10.4.0.14:58562 dst=10.4.1.4:7000 tls=disabled :method=POST :authority=webapp:7000 :path=/books/24783/edit src_res=deploy/traffic src_ns=default dst_res=deploy/webapp dst_ns=default rt_route=POST /books/{id}/edit +req id=3:1 proxy=in src=10.4.0.14:58562 dst=10.4.1.4:7000 tls=disabled :method=POST :authority=webapp:7000 :path=/books/24783/edit src_res=deploy/traffic src_ns=default dst_res=deploy/webapp dst_ns=default rt_route=POST /books/{id}/edit ``` This will select only the requests observed and show the `:authority` and diff --git a/linkerd.io/content/2.11/tasks/multicluster-using-statefulsets.md b/linkerd.io/content/2.11/tasks/multicluster-using-statefulsets.md index 009dfe24d8..687520a57d 100644 --- a/linkerd.io/content/2.11/tasks/multicluster-using-statefulsets.md +++ b/linkerd.io/content/2.11/tasks/multicluster-using-statefulsets.md @@ -178,7 +178,7 @@ nginx-set-2 2/2 Running 0 4m51s curl-56dc7d945d-s4n8j 0/2 PodInitializing 0 4s $ kubectl --context=k3d-west exec -it curl-56dc7d945d-s4n8j -c curl -- bin/sh -/# prompt for curl pod +/$ prompt for curl pod ``` If we now curl one of these instances, we will get back a response. diff --git a/linkerd.io/content/2.11/tasks/securing-your-cluster.md b/linkerd.io/content/2.11/tasks/securing-your-cluster.md index 6c0efb9462..94d8f7dcc2 100644 --- a/linkerd.io/content/2.11/tasks/securing-your-cluster.md +++ b/linkerd.io/content/2.11/tasks/securing-your-cluster.md @@ -54,7 +54,7 @@ kubectl auth can-i watch deployments.tap.linkerd.io -n emojivoto --as $(whoami) You can also use the Linkerd CLI's `--as` flag to confirm: ```bash -linkerd viz tap -n linkerd deploy/linkerd-controller --as $(whoami) +$ linkerd viz tap -n linkerd deploy/linkerd-controller --as $(whoami) Cannot connect to Linkerd Viz: namespaces is forbidden: User "XXXX" cannot list resource "namespaces" in API group "" at the cluster scope Validate the install with: linkerd viz check ... @@ -71,7 +71,7 @@ To enable tap access to all resources in all namespaces, you may bind your user to the `linkerd-linkerd-tap-admin` ClusterRole, installed by default: ```bash -kubectl describe clusterroles/linkerd-linkerd-viz-tap-admin +$ kubectl describe clusterroles/linkerd-linkerd-viz-tap-admin Name: linkerd-linkerd-viz-tap-admin Labels: component=tap linkerd.io/extension=viz @@ -103,7 +103,7 @@ kubectl create clusterrolebinding \ You can verify you now have tap access with: ```bash -linkerd viz tap -n linkerd deploy/linkerd-controller --as $(whoami) +$ linkerd viz tap -n linkerd deploy/linkerd-controller --as $(whoami) req id=3:0 proxy=in src=10.244.0.1:37392 dst=10.244.0.13:9996 tls=not_provided_by_remote :method=GET :authority=10.244.0.13:9996 :path=/ping ... ``` @@ -137,14 +137,14 @@ Because GCloud provides this additional level of access, there are cases where not. To validate this, check whether your GCloud user has Tap access: ```bash -kubectl auth can-i watch pods.tap.linkerd.io --all-namespaces +$ kubectl auth can-i watch pods.tap.linkerd.io --all-namespaces yes ``` And then validate whether your RBAC user has Tap access: ```bash -kubectl auth can-i watch pods.tap.linkerd.io --all-namespaces --as $(gcloud config get-value account) +$ kubectl auth can-i watch pods.tap.linkerd.io --all-namespaces --as $(gcloud config get-value account) no - no RBAC policy matched ``` @@ -181,14 +181,14 @@ privileges necessary to tap resources. To confirm: ```bash -kubectl auth can-i watch pods.tap.linkerd.io --all-namespaces --as system:serviceaccount:linkerd-viz:web +$ kubectl auth can-i watch pods.tap.linkerd.io --all-namespaces --as system:serviceaccount:linkerd-viz:web yes ``` This access is enabled via a `linkerd-linkerd-viz-web-admin` ClusterRoleBinding: ```bash -kubectl describe clusterrolebindings/linkerd-linkerd-viz-web-admin +$ kubectl describe clusterrolebindings/linkerd-linkerd-viz-web-admin Name: linkerd-linkerd-viz-web-admin Labels: component=web linkerd.io/extensions=viz @@ -221,6 +221,6 @@ kubectl delete clusterrolebindings/linkerd-linkerd-viz-web-admin To confirm: ```bash -kubectl auth can-i watch pods.tap.linkerd.io --all-namespaces --as system:serviceaccount:linkerd-viz:web +$ kubectl auth can-i watch pods.tap.linkerd.io --all-namespaces --as system:serviceaccount:linkerd-viz:web no ``` diff --git a/linkerd.io/content/2.12/reference/cli/check.md b/linkerd.io/content/2.12/reference/cli/check.md index 67a2486908..7cd61cd237 100644 --- a/linkerd.io/content/2.12/reference/cli/check.md +++ b/linkerd.io/content/2.12/reference/cli/check.md @@ -12,7 +12,7 @@ for a full list of all the possible checks, what they do and how to fix them. ## Example output ```bash -linkerd check +$ linkerd check kubernetes-api -------------- √ can initialize the client diff --git a/linkerd.io/content/2.12/tasks/getting-per-route-metrics.md b/linkerd.io/content/2.12/tasks/getting-per-route-metrics.md index 9f66470e28..10e2beb9a1 100644 --- a/linkerd.io/content/2.12/tasks/getting-per-route-metrics.md +++ b/linkerd.io/content/2.12/tasks/getting-per-route-metrics.md @@ -24,7 +24,7 @@ per-route authorization. You can view per-route metrics in the CLI by running `linkerd viz routes`: ```bash -linkerd viz routes svc/webapp +$ linkerd viz routes svc/webapp ROUTE SERVICE SUCCESS RPS LATENCY_P50 LATENCY_P95 LATENCY_P99 GET / webapp 100.00% 0.6rps 25ms 30ms 30ms GET /authors/{id} webapp 100.00% 0.6rps 22ms 29ms 30ms @@ -44,7 +44,7 @@ specified in your service profile will end up there. It is also possible to look the metrics up by other resource types, such as: ```bash -linkerd viz routes deploy/webapp +$ linkerd viz routes deploy/webapp ROUTE SERVICE SUCCESS RPS LATENCY_P50 LATENCY_P95 LATENCY_P99 [DEFAULT] kubernetes 0.00% 0.0rps 0ms 0ms 0ms GET / webapp 100.00% 0.5rps 27ms 38ms 40ms @@ -63,7 +63,7 @@ Then, it is possible to filter all the way down to requests going from a specific resource to other services: ```bash -linkerd viz routes deploy/webapp --to svc/books +$ linkerd viz routes deploy/webapp --to svc/books ROUTE SERVICE SUCCESS RPS LATENCY_P50 LATENCY_P95 LATENCY_P99 DELETE /books/{id}.json books 100.00% 0.5rps 18ms 29ms 30ms GET /books.json books 100.00% 1.1rps 7ms 12ms 18ms @@ -80,7 +80,7 @@ If you're not seeing any metrics, there are two likely culprits. In both cases, the service points to, run: ```bash -linkerd viz tap deploy/webapp -o wide | grep req +$ linkerd viz tap deploy/webapp -o wide | grep req ``` A sample output is: diff --git a/linkerd.io/content/2.12/tasks/multicluster-using-statefulsets.md b/linkerd.io/content/2.12/tasks/multicluster-using-statefulsets.md index b4b6920aad..bc0108fa23 100644 --- a/linkerd.io/content/2.12/tasks/multicluster-using-statefulsets.md +++ b/linkerd.io/content/2.12/tasks/multicluster-using-statefulsets.md @@ -179,7 +179,7 @@ nginx-set-2 2/2 Running 0 4m51s curl-56dc7d945d-s4n8j 0/2 PodInitializing 0 4s $ kubectl --context=k3d-west exec -it curl-56dc7d945d-s4n8j -c curl -- sh -/# prompt for curl pod +/$ prompt for curl pod ``` If we now curl one of these instances, we will get back a response. @@ -235,7 +235,7 @@ endpoints for `nginx-svc-west` will have the same hostnames, but each hostname will point to one of the services we see above: ```sh -$ kubectl --context=k3d-east get endpoints nginx-svc-west -o yaml +$ kubectl --context=k3d-east get endpoints nginx-svc-k3d-west -o yaml subsets: - addresses: - hostname: nginx-set-0 @@ -256,7 +256,7 @@ NAME READY STATUS RESTARTS AGE curl-56dc7d945d-96r6p 2/2 Running 0 23m # exec and curl -$ kubectl --context=k3d-east exec pod curl-56dc7d945d-96r6p -it -c curl -- bin/sh +$ kubectl --context=k3d-east exec curl-56dc7d945d-96r6p -it -c curl -- sh # we want to curl the same hostname we see in the endpoints object above. # however, the service and cluster domain will now be different, since we # are in a different cluster. diff --git a/linkerd.io/content/2.13/reference/cli/check.md b/linkerd.io/content/2.13/reference/cli/check.md index 67a2486908..7cd61cd237 100644 --- a/linkerd.io/content/2.13/reference/cli/check.md +++ b/linkerd.io/content/2.13/reference/cli/check.md @@ -12,7 +12,7 @@ for a full list of all the possible checks, what they do and how to fix them. ## Example output ```bash -linkerd check +$ linkerd check kubernetes-api -------------- √ can initialize the client diff --git a/linkerd.io/content/2.13/tasks/getting-per-route-metrics.md b/linkerd.io/content/2.13/tasks/getting-per-route-metrics.md index a38bdbea89..ddd2a4dc3c 100644 --- a/linkerd.io/content/2.13/tasks/getting-per-route-metrics.md +++ b/linkerd.io/content/2.13/tasks/getting-per-route-metrics.md @@ -86,7 +86,7 @@ linkerd viz tap deploy/webapp -o wide | grep req A sample output is: ```bash -$ req id=3:1 proxy=in src=10.4.0.14:58562 dst=10.4.1.4:7000 tls=disabled :method=POST :authority=webapp:7000 :path=/books/24783/edit src_res=deploy/traffic src_ns=default dst_res=deploy/webapp dst_ns=default rt_route=POST /books/{id}/edit +req id=3:1 proxy=in src=10.4.0.14:58562 dst=10.4.1.4:7000 tls=disabled :method=POST :authority=webapp:7000 :path=/books/24783/edit src_res=deploy/traffic src_ns=default dst_res=deploy/webapp dst_ns=default rt_route=POST /books/{id}/edit ``` This will select only the requests observed and show the `:authority` and diff --git a/linkerd.io/content/2.13/tasks/multicluster-using-statefulsets.md b/linkerd.io/content/2.13/tasks/multicluster-using-statefulsets.md index 8a37486fea..6311ed9e02 100644 --- a/linkerd.io/content/2.13/tasks/multicluster-using-statefulsets.md +++ b/linkerd.io/content/2.13/tasks/multicluster-using-statefulsets.md @@ -179,7 +179,7 @@ nginx-set-2 2/2 Running 0 4m51s curl-56dc7d945d-s4n8j 0/2 PodInitializing 0 4s $ kubectl --context=k3d-west exec -it curl-56dc7d945d-s4n8j -c curl -- sh -/# prompt for curl pod +/$ prompt for curl pod ``` If we now curl one of these instances, we will get back a response. @@ -221,7 +221,7 @@ export the service. $ kubectl --context=k3d-west label service nginx-svc mirror.linkerd.io/exported="true" service/nginx-svc labeled -kubectl --context=k3d-east get services +$ kubectl --context=k3d-east get services NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE kubernetes ClusterIP 10.43.0.1 443/TCP 20h nginx-svc-west ClusterIP None 80/TCP 29s @@ -256,7 +256,7 @@ NAME READY STATUS RESTARTS AGE curl-56dc7d945d-96r6p 2/2 Running 0 23m # exec and curl -$ kubectl --context=k3d-east exec pod curl-56dc7d945d-96r6p -it -c curl -- bin/sh +$ kubectl --context=k3d-east exec curl-56dc7d945d-96r6p -it -c curl -- sh # we want to curl the same hostname we see in the endpoints object above. # however, the service and cluster domain will now be different, since we # are in a different cluster. diff --git a/linkerd.io/content/2.13/tasks/troubleshooting.md b/linkerd.io/content/2.13/tasks/troubleshooting.md index a635870d2f..e0bc46c57c 100644 --- a/linkerd.io/content/2.13/tasks/troubleshooting.md +++ b/linkerd.io/content/2.13/tasks/troubleshooting.md @@ -921,7 +921,7 @@ normally. Example failure: ```bash -linkerd check --proxy --namespace foo +$ linkerd check --proxy --namespace foo ... × data plane namespace exists The "foo" namespace does not exist @@ -1118,7 +1118,7 @@ Example error: Ensure that the linkerd-cni-config ConfigMap exists in the CNI namespace: ```bash -kubectl get cm linkerd-cni-config -n linkerd-cni +$ kubectl get cm linkerd-cni-config -n linkerd-cni NAME PRIV CAPS SELINUX RUNASUSER FSGROUP SUPGROUP READONLYROOTFS VOLUMES linkerd-linkerd-cni-cni false RunAsAny RunAsAny RunAsAny RunAsAny false hostPath,secret ``` @@ -1126,7 +1126,7 @@ linkerd-linkerd-cni-cni false RunAsAny RunAsAny RunAsAny RunAs Also ensure you have permission to create ConfigMaps: ```bash -kubectl auth can-i create ConfigMaps +$ kubectl auth can-i create ConfigMaps yes ``` @@ -1143,7 +1143,7 @@ Example error: Ensure that the cluster role exists: ```bash -kubectl get clusterrole linkerd-cni +$ kubectl get clusterrole linkerd-cni NAME AGE linkerd-cni 54m ``` @@ -1151,7 +1151,7 @@ linkerd-cni 54m Also ensure you have permission to create ClusterRoles: ```bash -kubectl auth can-i create ClusterRoles +$ kubectl auth can-i create ClusterRoles yes ``` @@ -1168,7 +1168,7 @@ Example error: Ensure that the cluster role binding exists: ```bash -kubectl get clusterrolebinding linkerd-cni +$ kubectl get clusterrolebinding linkerd-cni NAME AGE linkerd-cni 54m ``` @@ -1176,7 +1176,7 @@ linkerd-cni 54m Also ensure you have permission to create ClusterRoleBindings: ```bash -kubectl auth can-i create ClusterRoleBindings +$ kubectl auth can-i create ClusterRoleBindings yes ``` diff --git a/linkerd.io/content/2.14/reference/cli/check.md b/linkerd.io/content/2.14/reference/cli/check.md index 67a2486908..7cd61cd237 100644 --- a/linkerd.io/content/2.14/reference/cli/check.md +++ b/linkerd.io/content/2.14/reference/cli/check.md @@ -12,7 +12,7 @@ for a full list of all the possible checks, what they do and how to fix them. ## Example output ```bash -linkerd check +$ linkerd check kubernetes-api -------------- √ can initialize the client diff --git a/linkerd.io/content/2.14/tasks/getting-per-route-metrics.md b/linkerd.io/content/2.14/tasks/getting-per-route-metrics.md index c2db8c0965..5e7b746581 100644 --- a/linkerd.io/content/2.14/tasks/getting-per-route-metrics.md +++ b/linkerd.io/content/2.14/tasks/getting-per-route-metrics.md @@ -24,7 +24,7 @@ per-route authorization. You can view per-route metrics in the CLI by running `linkerd viz routes`: ```bash -linkerd viz routes svc/webapp +$ linkerd viz routes svc/webapp ROUTE SERVICE SUCCESS RPS LATENCY_P50 LATENCY_P95 LATENCY_P99 GET / webapp 100.00% 0.6rps 25ms 30ms 30ms GET /authors/{id} webapp 100.00% 0.6rps 22ms 29ms 30ms @@ -44,7 +44,7 @@ specified in your service profile will end up there. It is also possible to look the metrics up by other resource types, such as: ```bash -linkerd viz routes deploy/webapp +$ linkerd viz routes deploy/webapp ROUTE SERVICE SUCCESS RPS LATENCY_P50 LATENCY_P95 LATENCY_P99 [DEFAULT] kubernetes 0.00% 0.0rps 0ms 0ms 0ms GET / webapp 100.00% 0.5rps 27ms 38ms 40ms @@ -63,7 +63,7 @@ Then, it is possible to filter all the way down to requests going from a specific resource to other services: ```bash -linkerd viz routes deploy/webapp --to svc/books +$ linkerd viz routes deploy/webapp --to svc/books ROUTE SERVICE SUCCESS RPS LATENCY_P50 LATENCY_P95 LATENCY_P99 DELETE /books/{id}.json books 100.00% 0.5rps 18ms 29ms 30ms GET /books.json books 100.00% 1.1rps 7ms 12ms 18ms @@ -80,7 +80,7 @@ If you're not seeing any metrics, there are two likely culprits. In both cases, the service points to, run: ```bash -linkerd viz tap deploy/webapp -o wide | grep req +$ linkerd viz tap deploy/webapp -o wide | grep req ``` A sample output is: diff --git a/linkerd.io/content/2.14/tasks/multicluster-using-statefulsets.md b/linkerd.io/content/2.14/tasks/multicluster-using-statefulsets.md index 7b4ad479c5..bc0108fa23 100644 --- a/linkerd.io/content/2.14/tasks/multicluster-using-statefulsets.md +++ b/linkerd.io/content/2.14/tasks/multicluster-using-statefulsets.md @@ -179,7 +179,7 @@ nginx-set-2 2/2 Running 0 4m51s curl-56dc7d945d-s4n8j 0/2 PodInitializing 0 4s $ kubectl --context=k3d-west exec -it curl-56dc7d945d-s4n8j -c curl -- sh -/# prompt for curl pod +/$ prompt for curl pod ``` If we now curl one of these instances, we will get back a response. @@ -221,7 +221,7 @@ export the service. $ kubectl --context=k3d-west label service nginx-svc mirror.linkerd.io/exported="true" service/nginx-svc labeled -kubectl --context=k3d-east get services +$ kubectl --context=k3d-east get services NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE kubernetes ClusterIP 10.43.0.1 443/TCP 20h nginx-svc-west ClusterIP None 80/TCP 29s @@ -256,7 +256,7 @@ NAME READY STATUS RESTARTS AGE curl-56dc7d945d-96r6p 2/2 Running 0 23m # exec and curl -$ kubectl --context=k3d-east exec pod curl-56dc7d945d-96r6p -it -c curl -- bin/sh +$ kubectl --context=k3d-east exec curl-56dc7d945d-96r6p -it -c curl -- sh # we want to curl the same hostname we see in the endpoints object above. # however, the service and cluster domain will now be different, since we # are in a different cluster. diff --git a/linkerd.io/content/2.14/tasks/troubleshooting.md b/linkerd.io/content/2.14/tasks/troubleshooting.md index 804a2382ee..3b7a9b17f5 100644 --- a/linkerd.io/content/2.14/tasks/troubleshooting.md +++ b/linkerd.io/content/2.14/tasks/troubleshooting.md @@ -921,7 +921,7 @@ normally. Example failure: ```bash -linkerd check --proxy --namespace foo +$ linkerd check --proxy --namespace foo ... × data plane namespace exists The "foo" namespace does not exist diff --git a/linkerd.io/content/2.15/reference/cli/check.md b/linkerd.io/content/2.15/reference/cli/check.md index 67a2486908..7cd61cd237 100644 --- a/linkerd.io/content/2.15/reference/cli/check.md +++ b/linkerd.io/content/2.15/reference/cli/check.md @@ -12,7 +12,7 @@ for a full list of all the possible checks, what they do and how to fix them. ## Example output ```bash -linkerd check +$ linkerd check kubernetes-api -------------- √ can initialize the client diff --git a/linkerd.io/content/2.15/tasks/distributed-tracing.md b/linkerd.io/content/2.15/tasks/distributed-tracing.md index 467947fd5b..9527f5d825 100644 --- a/linkerd.io/content/2.15/tasks/distributed-tracing.md +++ b/linkerd.io/content/2.15/tasks/distributed-tracing.md @@ -98,7 +98,7 @@ With `vote-bot` starting traces for every request, spans should now be showing up in Jaeger. To get to the UI, run: ```bash -$ linkerd jaeger dashboard +linkerd jaeger dashboard ``` ![Jaeger](/docs/images/tracing/jaeger-empty.png "Jaeger") @@ -144,8 +144,8 @@ To cleanup, uninstall the Linkerd-Jaeger extension along with emojivoto by running: ```bash -$ linkerd jaeger uninstall | kubectl delete -f - -$ kubectl delete ns emojivoto +linkerd jaeger uninstall | kubectl delete -f - +kubectl delete ns emojivoto ``` ## Bring your own Jaeger @@ -158,7 +158,7 @@ Create the following YAML file which disables the built in Jaeger instance and specifies the OpenCensus collector's config. ```bash -$ cat < jaeger-linkerd.yaml +cat < jaeger-linkerd.yaml jaeger: enabled: false collector: @@ -193,7 +193,7 @@ collector: processors: [batch] exporters: [jaeger] EOF -$ linkerd jaeger install --values ./jaeger-linkerd.yaml | kubectl apply -f - +linkerd jaeger install --values ./jaeger-linkerd.yaml | kubectl apply -f - ``` You'll want to ensure that the `exporters.jaeger.endpoint` which is diff --git a/linkerd.io/content/2.15/tasks/multicluster-using-statefulsets.md b/linkerd.io/content/2.15/tasks/multicluster-using-statefulsets.md index cd6104f42f..bc0108fa23 100644 --- a/linkerd.io/content/2.15/tasks/multicluster-using-statefulsets.md +++ b/linkerd.io/content/2.15/tasks/multicluster-using-statefulsets.md @@ -179,7 +179,7 @@ nginx-set-2 2/2 Running 0 4m51s curl-56dc7d945d-s4n8j 0/2 PodInitializing 0 4s $ kubectl --context=k3d-west exec -it curl-56dc7d945d-s4n8j -c curl -- sh -/# prompt for curl pod +/$ prompt for curl pod ``` If we now curl one of these instances, we will get back a response. diff --git a/linkerd.io/content/2.15/tasks/troubleshooting.md b/linkerd.io/content/2.15/tasks/troubleshooting.md index 194a7b5b8b..9873774220 100644 --- a/linkerd.io/content/2.15/tasks/troubleshooting.md +++ b/linkerd.io/content/2.15/tasks/troubleshooting.md @@ -961,7 +961,7 @@ normally. Example failure: ```bash -linkerd check --proxy --namespace foo +$ linkerd check --proxy --namespace foo ... × data plane namespace exists The "foo" namespace does not exist @@ -1133,7 +1133,7 @@ Example error: Ensure that the linkerd-cni-config ConfigMap exists in the CNI namespace: ```bash -kubectl get cm linkerd-cni-config -n linkerd-cni +$ kubectl get cm linkerd-cni-config -n linkerd-cni NAME PRIV CAPS SELINUX RUNASUSER FSGROUP SUPGROUP READONLYROOTFS VOLUMES linkerd-linkerd-cni-cni false RunAsAny RunAsAny RunAsAny RunAsAny false hostPath,secret ``` @@ -1141,7 +1141,7 @@ linkerd-linkerd-cni-cni false RunAsAny RunAsAny RunAsAny RunAs Also ensure you have permission to create ConfigMaps: ```bash -kubectl auth can-i create ConfigMaps +$ kubectl auth can-i create ConfigMaps yes ``` @@ -1158,7 +1158,7 @@ Example error: Ensure that the cluster role exists: ```bash -kubectl get clusterrole linkerd-cni +$ kubectl get clusterrole linkerd-cni NAME AGE linkerd-cni 54m ``` @@ -1166,7 +1166,7 @@ linkerd-cni 54m Also ensure you have permission to create ClusterRoles: ```bash -kubectl auth can-i create ClusterRoles +$ kubectl auth can-i create ClusterRoles yes ``` @@ -1183,7 +1183,7 @@ Example error: Ensure that the cluster role binding exists: ```bash -kubectl get clusterrolebinding linkerd-cni +$ kubectl get clusterrolebinding linkerd-cni NAME AGE linkerd-cni 54m ``` @@ -1191,7 +1191,7 @@ linkerd-cni 54m Also ensure you have permission to create ClusterRoleBindings: ```bash -kubectl auth can-i create ClusterRoleBindings +$ kubectl auth can-i create ClusterRoleBindings yes ``` @@ -1208,7 +1208,7 @@ Example error: Ensure that the CNI service account exists in the CNI namespace: ```bash -kubectl get ServiceAccount linkerd-cni -n linkerd-cni +$ kubectl get ServiceAccount linkerd-cni -n linkerd-cni NAME SECRETS AGE linkerd-cni 1 45m ``` @@ -1216,7 +1216,7 @@ linkerd-cni 1 45m Also ensure you have permission to create ServiceAccount: ```bash -kubectl auth can-i create ServiceAccounts -n linkerd-cni +$ kubectl auth can-i create ServiceAccounts -n linkerd-cni yes ``` @@ -1233,7 +1233,7 @@ Example error: Ensure that the CNI daemonset exists in the CNI namespace: ```bash -kubectl get ds -n linkerd-cni +$ kubectl get ds -n linkerd-cni NAME DESIRED CURRENT READY UP-TO-DATE AVAILABLE NODE SELECTOR AGE linkerd-cni 1 1 1 1 1 beta.kubernetes.io/os=linux 14m ``` @@ -1258,7 +1258,7 @@ Example failure: Ensure that all the CNI pods are running: ```bash -kubectl get po -n linkerd-cni +$ kubectl get po -n linkerd-cni NAME READY STATUS RESTARTS AGE linkerd-cni-rzp2q 1/1 Running 0 9m20s linkerd-cni-mf564 1/1 Running 0 9m22s @@ -1268,7 +1268,7 @@ linkerd-cni-p5670 1/1 Running 0 9m25s Ensure that all pods have finished the deployment of the CNI config and binary: ```bash -kubectl logs linkerd-cni-rzp2q -n linkerd-cni +$ kubectl logs linkerd-cni-rzp2q -n linkerd-cni Wrote linkerd CNI binaries to /host/opt/cni/bin Created CNI config /host/etc/cni/net.d/10-kindnet.conflist Done configuring CNI. Sleep=true diff --git a/linkerd.io/content/2.16/reference/cli/check.md b/linkerd.io/content/2.16/reference/cli/check.md index 67a2486908..7cd61cd237 100644 --- a/linkerd.io/content/2.16/reference/cli/check.md +++ b/linkerd.io/content/2.16/reference/cli/check.md @@ -12,7 +12,7 @@ for a full list of all the possible checks, what they do and how to fix them. ## Example output ```bash -linkerd check +$ linkerd check kubernetes-api -------------- √ can initialize the client diff --git a/linkerd.io/content/2.16/tasks/multicluster-using-statefulsets.md b/linkerd.io/content/2.16/tasks/multicluster-using-statefulsets.md index 729e6c13c0..9a143a2bce 100644 --- a/linkerd.io/content/2.16/tasks/multicluster-using-statefulsets.md +++ b/linkerd.io/content/2.16/tasks/multicluster-using-statefulsets.md @@ -179,7 +179,7 @@ nginx-set-2 2/2 Running 0 4m51s curl-56dc7d945d-s4n8j 0/2 PodInitializing 0 4s $ kubectl --context=k3d-west exec -it curl-56dc7d945d-s4n8j -c curl -- sh -/# prompt for curl pod +/$ prompt for curl pod ``` If we now curl one of these instances, we will get back a response. @@ -221,7 +221,7 @@ export the service. $ kubectl --context=k3d-west label service nginx-svc mirror.linkerd.io/exported="true" service/nginx-svc labeled -kubectl --context=k3d-east get services +$ kubectl --context=k3d-east get services NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE kubernetes ClusterIP 10.43.0.1 443/TCP 20h nginx-svc-west ClusterIP None 80/TCP 29s @@ -256,7 +256,7 @@ NAME READY STATUS RESTARTS AGE curl-56dc7d945d-96r6p 2/2 Running 0 23m # exec and curl -kubectl --context=k3d-east exec curl-56dc7d945d-96r6p -it -c curl -- sh +$ kubectl --context=k3d-east exec curl-56dc7d945d-96r6p -it -c curl -- sh # we want to curl the same hostname we see in the endpoints object above. # however, the service and cluster domain will now be different, since we # are in a different cluster. diff --git a/linkerd.io/content/2.16/tasks/troubleshooting.md b/linkerd.io/content/2.16/tasks/troubleshooting.md index f18644ca83..6f37f2e24e 100644 --- a/linkerd.io/content/2.16/tasks/troubleshooting.md +++ b/linkerd.io/content/2.16/tasks/troubleshooting.md @@ -961,7 +961,7 @@ normally. Example failure: ```bash -linkerd check --proxy --namespace foo +$ linkerd check --proxy --namespace foo ... × data plane namespace exists The "foo" namespace does not exist @@ -1147,7 +1147,7 @@ Example error: Ensure that the linkerd-cni-config ConfigMap exists in the CNI namespace: ```bash -kubectl get cm linkerd-cni-config -n linkerd-cni +$ kubectl get cm linkerd-cni-config -n linkerd-cni NAME PRIV CAPS SELINUX RUNASUSER FSGROUP SUPGROUP READONLYROOTFS VOLUMES linkerd-linkerd-cni-cni false RunAsAny RunAsAny RunAsAny RunAsAny false hostPath,secret ``` @@ -1155,7 +1155,7 @@ linkerd-linkerd-cni-cni false RunAsAny RunAsAny RunAsAny RunAs Also ensure you have permission to create ConfigMaps: ```bash -kubectl auth can-i create ConfigMaps +$ kubectl auth can-i create ConfigMaps yes ``` @@ -1172,7 +1172,7 @@ Example error: Ensure that the cluster role exists: ```bash -kubectl get clusterrole linkerd-cni +$ kubectl get clusterrole linkerd-cni NAME AGE linkerd-cni 54m ``` @@ -1180,7 +1180,7 @@ linkerd-cni 54m Also ensure you have permission to create ClusterRoles: ```bash -kubectl auth can-i create ClusterRoles +$ kubectl auth can-i create ClusterRoles yes ``` @@ -1197,7 +1197,7 @@ Example error: Ensure that the cluster role binding exists: ```bash -kubectl get clusterrolebinding linkerd-cni +$ kubectl get clusterrolebinding linkerd-cni NAME AGE linkerd-cni 54m ``` @@ -1205,7 +1205,7 @@ linkerd-cni 54m Also ensure you have permission to create ClusterRoleBindings: ```bash -kubectl auth can-i create ClusterRoleBindings +$ kubectl auth can-i create ClusterRoleBindings yes ``` @@ -1222,7 +1222,7 @@ Example error: Ensure that the CNI service account exists in the CNI namespace: ```bash -kubectl get ServiceAccount linkerd-cni -n linkerd-cni +$ kubectl get ServiceAccount linkerd-cni -n linkerd-cni NAME SECRETS AGE linkerd-cni 1 45m ``` @@ -1230,7 +1230,7 @@ linkerd-cni 1 45m Also ensure you have permission to create ServiceAccount: ```bash -kubectl auth can-i create ServiceAccounts -n linkerd-cni +$ kubectl auth can-i create ServiceAccounts -n linkerd-cni yes ``` @@ -1247,7 +1247,7 @@ Example error: Ensure that the CNI daemonset exists in the CNI namespace: ```bash -kubectl get ds -n linkerd-cni +$ kubectl get ds -n linkerd-cni NAME DESIRED CURRENT READY UP-TO-DATE AVAILABLE NODE SELECTOR AGE linkerd-cni 1 1 1 1 1 beta.kubernetes.io/os=linux 14m ``` @@ -1255,7 +1255,7 @@ linkerd-cni 1 1 1 1 1 beta.kubernet Also ensure you have permission to create DaemonSets: ```bash -kubectl auth can-i create DaemonSets -n linkerd-cni +$ kubectl auth can-i create DaemonSets -n linkerd-cni yes ``` @@ -1272,7 +1272,7 @@ Example failure: Ensure that all the CNI pods are running: ```bash -kubectl get po -n linkerd-cni +$ kubectl get po -n linkerd-cni NAME READY STATUS RESTARTS AGE linkerd-cni-rzp2q 1/1 Running 0 9m20s linkerd-cni-mf564 1/1 Running 0 9m22s @@ -1282,7 +1282,7 @@ linkerd-cni-p5670 1/1 Running 0 9m25s Ensure that all pods have finished the deployment of the CNI config and binary: ```bash -kubectl logs linkerd-cni-rzp2q -n linkerd-cni +$ kubectl logs linkerd-cni-rzp2q -n linkerd-cni Wrote linkerd CNI binaries to /host/opt/cni/bin Created CNI config /host/etc/cni/net.d/10-kindnet.conflist Done configuring CNI. Sleep=true diff --git a/linkerd.io/content/2.17/reference/cli/check.md b/linkerd.io/content/2.17/reference/cli/check.md index 67a2486908..7cd61cd237 100644 --- a/linkerd.io/content/2.17/reference/cli/check.md +++ b/linkerd.io/content/2.17/reference/cli/check.md @@ -12,7 +12,7 @@ for a full list of all the possible checks, what they do and how to fix them. ## Example output ```bash -linkerd check +$ linkerd check kubernetes-api -------------- √ can initialize the client diff --git a/linkerd.io/content/2.17/tasks/multicluster-using-statefulsets.md b/linkerd.io/content/2.17/tasks/multicluster-using-statefulsets.md index 55762acf29..bc0108fa23 100644 --- a/linkerd.io/content/2.17/tasks/multicluster-using-statefulsets.md +++ b/linkerd.io/content/2.17/tasks/multicluster-using-statefulsets.md @@ -179,7 +179,7 @@ nginx-set-2 2/2 Running 0 4m51s curl-56dc7d945d-s4n8j 0/2 PodInitializing 0 4s $ kubectl --context=k3d-west exec -it curl-56dc7d945d-s4n8j -c curl -- sh -/# prompt for curl pod +/$ prompt for curl pod ``` If we now curl one of these instances, we will get back a response. @@ -221,7 +221,7 @@ export the service. $ kubectl --context=k3d-west label service nginx-svc mirror.linkerd.io/exported="true" service/nginx-svc labeled -kubectl --context=k3d-east get services +$ kubectl --context=k3d-east get services NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE kubernetes ClusterIP 10.43.0.1 443/TCP 20h nginx-svc-west ClusterIP None 80/TCP 29s diff --git a/linkerd.io/content/2.17/tasks/troubleshooting.md b/linkerd.io/content/2.17/tasks/troubleshooting.md index c0dcb7b154..c4a4407e90 100644 --- a/linkerd.io/content/2.17/tasks/troubleshooting.md +++ b/linkerd.io/content/2.17/tasks/troubleshooting.md @@ -961,7 +961,7 @@ normally. Example failure: ```bash -linkerd check --proxy --namespace foo +$ linkerd check --proxy --namespace foo ... × data plane namespace exists The "foo" namespace does not exist @@ -1147,7 +1147,7 @@ Example error: Ensure that the linkerd-cni-config ConfigMap exists in the CNI namespace: ```bash -kubectl get cm linkerd-cni-config -n linkerd-cni +$ kubectl get cm linkerd-cni-config -n linkerd-cni NAME PRIV CAPS SELINUX RUNASUSER FSGROUP SUPGROUP READONLYROOTFS VOLUMES linkerd-linkerd-cni-cni false RunAsAny RunAsAny RunAsAny RunAsAny false hostPath,secret ``` @@ -1155,7 +1155,7 @@ linkerd-linkerd-cni-cni false RunAsAny RunAsAny RunAsAny RunAs Also ensure you have permission to create ConfigMaps: ```bash -kubectl auth can-i create ConfigMaps +$ kubectl auth can-i create ConfigMaps yes ``` @@ -1172,7 +1172,7 @@ Example error: Ensure that the cluster role exists: ```bash -kubectl get clusterrole linkerd-cni +$ kubectl get clusterrole linkerd-cni NAME AGE linkerd-cni 54m ``` @@ -1180,7 +1180,7 @@ linkerd-cni 54m Also ensure you have permission to create ClusterRoles: ```bash -kubectl auth can-i create ClusterRoles +$ kubectl auth can-i create ClusterRoles yes ``` @@ -1197,7 +1197,7 @@ Example error: Ensure that the cluster role binding exists: ```bash -kubectl get clusterrolebinding linkerd-cni +$ kubectl get clusterrolebinding linkerd-cni NAME AGE linkerd-cni 54m ``` @@ -1205,7 +1205,7 @@ linkerd-cni 54m Also ensure you have permission to create ClusterRoleBindings: ```bash -kubectl auth can-i create ClusterRoleBindings +$ kubectl auth can-i create ClusterRoleBindings yes ``` @@ -1222,7 +1222,7 @@ Example error: Ensure that the CNI service account exists in the CNI namespace: ```bash -kubectl get ServiceAccount linkerd-cni -n linkerd-cni +$ kubectl get ServiceAccount linkerd-cni -n linkerd-cni NAME SECRETS AGE linkerd-cni 1 45m ``` @@ -1230,7 +1230,7 @@ linkerd-cni 1 45m Also ensure you have permission to create ServiceAccount: ```bash -kubectl auth can-i create ServiceAccounts -n linkerd-cni +$ kubectl auth can-i create ServiceAccounts -n linkerd-cni yes ``` @@ -1247,7 +1247,7 @@ Example error: Ensure that the CNI daemonset exists in the CNI namespace: ```bash -kubectl get ds -n linkerd-cni +$ kubectl get ds -n linkerd-cni NAME DESIRED CURRENT READY UP-TO-DATE AVAILABLE NODE SELECTOR AGE linkerd-cni 1 1 1 1 1 beta.kubernetes.io/os=linux 14m ``` @@ -1255,7 +1255,7 @@ linkerd-cni 1 1 1 1 1 beta.kubernet Also ensure you have permission to create DaemonSets: ```bash -kubectl auth can-i create DaemonSets -n linkerd-cni +$ kubectl auth can-i create DaemonSets -n linkerd-cni yes ``` @@ -1272,7 +1272,7 @@ Example failure: Ensure that all the CNI pods are running: ```bash -kubectl get po -n linkerd-cni +$ kubectl get po -n linkerd-cni NAME READY STATUS RESTARTS AGE linkerd-cni-rzp2q 1/1 Running 0 9m20s linkerd-cni-mf564 1/1 Running 0 9m22s @@ -1282,7 +1282,7 @@ linkerd-cni-p5670 1/1 Running 0 9m25s Ensure that all pods have finished the deployment of the CNI config and binary: ```bash -kubectl logs linkerd-cni-rzp2q -n linkerd-cni +$ kubectl logs linkerd-cni-rzp2q -n linkerd-cni Wrote linkerd CNI binaries to /host/opt/cni/bin Created CNI config /host/etc/cni/net.d/10-kindnet.conflist Done configuring CNI. Sleep=true diff --git a/linkerd.io/content/2.18/reference/cli/check.md b/linkerd.io/content/2.18/reference/cli/check.md index 67a2486908..7cd61cd237 100644 --- a/linkerd.io/content/2.18/reference/cli/check.md +++ b/linkerd.io/content/2.18/reference/cli/check.md @@ -12,7 +12,7 @@ for a full list of all the possible checks, what they do and how to fix them. ## Example output ```bash -linkerd check +$ linkerd check kubernetes-api -------------- √ can initialize the client diff --git a/linkerd.io/content/2.18/tasks/multicluster-using-statefulsets.md b/linkerd.io/content/2.18/tasks/multicluster-using-statefulsets.md index f193db3a51..eace8623fa 100644 --- a/linkerd.io/content/2.18/tasks/multicluster-using-statefulsets.md +++ b/linkerd.io/content/2.18/tasks/multicluster-using-statefulsets.md @@ -178,7 +178,7 @@ nginx-set-2 2/2 Running 0 4m51s curl-56dc7d945d-s4n8j 0/2 PodInitializing 0 4s $ kubectl --context=k3d-west exec -it curl-56dc7d945d-s4n8j -c curl -- sh -/# prompt for curl pod +/$ prompt for curl pod ``` If we now curl one of these instances, we will get back a response. @@ -220,7 +220,7 @@ export the service. $ kubectl --context=k3d-west label service nginx-svc mirror.linkerd.io/exported="true" service/nginx-svc labeled -kubectl --context=k3d-east get services +$ kubectl --context=k3d-east get services NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE kubernetes ClusterIP 10.43.0.1 443/TCP 20h nginx-svc-west ClusterIP None 80/TCP 29s @@ -255,7 +255,7 @@ NAME READY STATUS RESTARTS AGE curl-56dc7d945d-96r6p 2/2 Running 0 23m # exec and curl -kubectl --context=k3d-east exec curl-56dc7d945d-96r6p -it -c curl -- sh +$ kubectl --context=k3d-east exec curl-56dc7d945d-96r6p -it -c curl -- sh # we want to curl the same hostname we see in the endpoints object above. # however, the service and cluster domain will now be different, since we # are in a different cluster. diff --git a/linkerd.io/content/2.18/tasks/troubleshooting.md b/linkerd.io/content/2.18/tasks/troubleshooting.md index a1d30e5a89..ac4b53e57d 100644 --- a/linkerd.io/content/2.18/tasks/troubleshooting.md +++ b/linkerd.io/content/2.18/tasks/troubleshooting.md @@ -961,7 +961,7 @@ normally. Example failure: ```bash -linkerd check --proxy --namespace foo +$ linkerd check --proxy --namespace foo ... × data plane namespace exists The "foo" namespace does not exist @@ -1108,7 +1108,7 @@ extension binaries implement it. For more information, See Example error: ```bash -invalid extension check output from \"jaeger\" (JSON object expected) +invalid extension check output from \"viz\" (JSON object expected) ``` Make sure that the extension binary implements `check --output json` which @@ -1147,7 +1147,7 @@ Example error: Ensure that the linkerd-cni-config ConfigMap exists in the CNI namespace: ```bash -kubectl get cm linkerd-cni-config -n linkerd-cni +$ kubectl get cm linkerd-cni-config -n linkerd-cni NAME PRIV CAPS SELINUX RUNASUSER FSGROUP SUPGROUP READONLYROOTFS VOLUMES linkerd-linkerd-cni-cni false RunAsAny RunAsAny RunAsAny RunAsAny false hostPath,secret ``` @@ -1155,7 +1155,7 @@ linkerd-linkerd-cni-cni false RunAsAny RunAsAny RunAsAny RunAs Also ensure you have permission to create ConfigMaps: ```bash -kubectl auth can-i create ConfigMaps +$ kubectl auth can-i create ConfigMaps yes ``` @@ -1172,7 +1172,7 @@ Example error: Ensure that the cluster role exists: ```bash -kubectl get clusterrole linkerd-cni +$ kubectl get clusterrole linkerd-cni NAME AGE linkerd-cni 54m ``` @@ -1180,7 +1180,7 @@ linkerd-cni 54m Also ensure you have permission to create ClusterRoles: ```bash -kubectl auth can-i create ClusterRoles +$ kubectl auth can-i create ClusterRoles yes ``` @@ -1197,7 +1197,7 @@ Example error: Ensure that the cluster role binding exists: ```bash -kubectl get clusterrolebinding linkerd-cni +$ kubectl get clusterrolebinding linkerd-cni NAME AGE linkerd-cni 54m ``` @@ -1205,7 +1205,7 @@ linkerd-cni 54m Also ensure you have permission to create ClusterRoleBindings: ```bash -kubectl auth can-i create ClusterRoleBindings +$ kubectl auth can-i create ClusterRoleBindings yes ``` @@ -1222,7 +1222,7 @@ Example error: Ensure that the CNI service account exists in the CNI namespace: ```bash -kubectl get ServiceAccount linkerd-cni -n linkerd-cni +$ kubectl get ServiceAccount linkerd-cni -n linkerd-cni NAME SECRETS AGE linkerd-cni 1 45m ``` @@ -1230,7 +1230,7 @@ linkerd-cni 1 45m Also ensure you have permission to create ServiceAccount: ```bash -kubectl auth can-i create ServiceAccounts -n linkerd-cni +$ kubectl auth can-i create ServiceAccounts -n linkerd-cni yes ``` @@ -1247,7 +1247,7 @@ Example error: Ensure that the CNI daemonset exists in the CNI namespace: ```bash -kubectl get ds -n linkerd-cni +$ kubectl get ds -n linkerd-cni NAME DESIRED CURRENT READY UP-TO-DATE AVAILABLE NODE SELECTOR AGE linkerd-cni 1 1 1 1 1 beta.kubernetes.io/os=linux 14m ``` @@ -1255,7 +1255,7 @@ linkerd-cni 1 1 1 1 1 beta.kubernet Also ensure you have permission to create DaemonSets: ```bash -kubectl auth can-i create DaemonSets -n linkerd-cni +$ kubectl auth can-i create DaemonSets -n linkerd-cni yes ``` @@ -1272,7 +1272,7 @@ Example failure: Ensure that all the CNI pods are running: ```bash -kubectl get po -n linkerd-cni +$ kubectl get po -n linkerd-cni NAME READY STATUS RESTARTS AGE linkerd-cni-rzp2q 1/1 Running 0 9m20s linkerd-cni-mf564 1/1 Running 0 9m22s @@ -1282,7 +1282,7 @@ linkerd-cni-p5670 1/1 Running 0 9m25s Ensure that all pods have finished the deployment of the CNI config and binary: ```bash -kubectl logs linkerd-cni-rzp2q -n linkerd-cni +$ kubectl logs linkerd-cni-rzp2q -n linkerd-cni Wrote linkerd CNI binaries to /host/opt/cni/bin Created CNI config /host/etc/cni/net.d/10-kindnet.conflist Done configuring CNI. Sleep=true diff --git a/linkerd.io/content/2.19/reference/cli/check.md b/linkerd.io/content/2.19/reference/cli/check.md index 67a2486908..7cd61cd237 100644 --- a/linkerd.io/content/2.19/reference/cli/check.md +++ b/linkerd.io/content/2.19/reference/cli/check.md @@ -12,7 +12,7 @@ for a full list of all the possible checks, what they do and how to fix them. ## Example output ```bash -linkerd check +$ linkerd check kubernetes-api -------------- √ can initialize the client diff --git a/linkerd.io/content/2.19/tasks/multicluster-using-statefulsets.md b/linkerd.io/content/2.19/tasks/multicluster-using-statefulsets.md index ef1f3dff24..eace8623fa 100644 --- a/linkerd.io/content/2.19/tasks/multicluster-using-statefulsets.md +++ b/linkerd.io/content/2.19/tasks/multicluster-using-statefulsets.md @@ -178,7 +178,7 @@ nginx-set-2 2/2 Running 0 4m51s curl-56dc7d945d-s4n8j 0/2 PodInitializing 0 4s $ kubectl --context=k3d-west exec -it curl-56dc7d945d-s4n8j -c curl -- sh -/# prompt for curl pod +/$ prompt for curl pod ``` If we now curl one of these instances, we will get back a response. diff --git a/linkerd.io/content/2.19/tasks/troubleshooting.md b/linkerd.io/content/2.19/tasks/troubleshooting.md index f18644ca83..6f37f2e24e 100644 --- a/linkerd.io/content/2.19/tasks/troubleshooting.md +++ b/linkerd.io/content/2.19/tasks/troubleshooting.md @@ -961,7 +961,7 @@ normally. Example failure: ```bash -linkerd check --proxy --namespace foo +$ linkerd check --proxy --namespace foo ... × data plane namespace exists The "foo" namespace does not exist @@ -1147,7 +1147,7 @@ Example error: Ensure that the linkerd-cni-config ConfigMap exists in the CNI namespace: ```bash -kubectl get cm linkerd-cni-config -n linkerd-cni +$ kubectl get cm linkerd-cni-config -n linkerd-cni NAME PRIV CAPS SELINUX RUNASUSER FSGROUP SUPGROUP READONLYROOTFS VOLUMES linkerd-linkerd-cni-cni false RunAsAny RunAsAny RunAsAny RunAsAny false hostPath,secret ``` @@ -1155,7 +1155,7 @@ linkerd-linkerd-cni-cni false RunAsAny RunAsAny RunAsAny RunAs Also ensure you have permission to create ConfigMaps: ```bash -kubectl auth can-i create ConfigMaps +$ kubectl auth can-i create ConfigMaps yes ``` @@ -1172,7 +1172,7 @@ Example error: Ensure that the cluster role exists: ```bash -kubectl get clusterrole linkerd-cni +$ kubectl get clusterrole linkerd-cni NAME AGE linkerd-cni 54m ``` @@ -1180,7 +1180,7 @@ linkerd-cni 54m Also ensure you have permission to create ClusterRoles: ```bash -kubectl auth can-i create ClusterRoles +$ kubectl auth can-i create ClusterRoles yes ``` @@ -1197,7 +1197,7 @@ Example error: Ensure that the cluster role binding exists: ```bash -kubectl get clusterrolebinding linkerd-cni +$ kubectl get clusterrolebinding linkerd-cni NAME AGE linkerd-cni 54m ``` @@ -1205,7 +1205,7 @@ linkerd-cni 54m Also ensure you have permission to create ClusterRoleBindings: ```bash -kubectl auth can-i create ClusterRoleBindings +$ kubectl auth can-i create ClusterRoleBindings yes ``` @@ -1222,7 +1222,7 @@ Example error: Ensure that the CNI service account exists in the CNI namespace: ```bash -kubectl get ServiceAccount linkerd-cni -n linkerd-cni +$ kubectl get ServiceAccount linkerd-cni -n linkerd-cni NAME SECRETS AGE linkerd-cni 1 45m ``` @@ -1230,7 +1230,7 @@ linkerd-cni 1 45m Also ensure you have permission to create ServiceAccount: ```bash -kubectl auth can-i create ServiceAccounts -n linkerd-cni +$ kubectl auth can-i create ServiceAccounts -n linkerd-cni yes ``` @@ -1247,7 +1247,7 @@ Example error: Ensure that the CNI daemonset exists in the CNI namespace: ```bash -kubectl get ds -n linkerd-cni +$ kubectl get ds -n linkerd-cni NAME DESIRED CURRENT READY UP-TO-DATE AVAILABLE NODE SELECTOR AGE linkerd-cni 1 1 1 1 1 beta.kubernetes.io/os=linux 14m ``` @@ -1255,7 +1255,7 @@ linkerd-cni 1 1 1 1 1 beta.kubernet Also ensure you have permission to create DaemonSets: ```bash -kubectl auth can-i create DaemonSets -n linkerd-cni +$ kubectl auth can-i create DaemonSets -n linkerd-cni yes ``` @@ -1272,7 +1272,7 @@ Example failure: Ensure that all the CNI pods are running: ```bash -kubectl get po -n linkerd-cni +$ kubectl get po -n linkerd-cni NAME READY STATUS RESTARTS AGE linkerd-cni-rzp2q 1/1 Running 0 9m20s linkerd-cni-mf564 1/1 Running 0 9m22s @@ -1282,7 +1282,7 @@ linkerd-cni-p5670 1/1 Running 0 9m25s Ensure that all pods have finished the deployment of the CNI config and binary: ```bash -kubectl logs linkerd-cni-rzp2q -n linkerd-cni +$ kubectl logs linkerd-cni-rzp2q -n linkerd-cni Wrote linkerd CNI binaries to /host/opt/cni/bin Created CNI config /host/etc/cni/net.d/10-kindnet.conflist Done configuring CNI. Sleep=true From 394b88f78f3849885b7da0eca02ab9f4f36e855d Mon Sep 17 00:00:00 2001 From: bezarsnba Date: Sat, 31 Jan 2026 18:23:29 -0300 Subject: [PATCH 16/31] revision files Signed-off-by: bezarsnba --- .../content/2-edge/tasks/troubleshooting.md | 26 +-- .../2.10/tasks/getting-per-route-metrics.md | 2 +- .../tasks/rotating_webhooks_certificates.md | 20 +-- .../tasks/multicluster-using-statefulsets.md | 13 +- .../tasks/multicluster-using-statefulsets.md | 6 +- .../tasks/multicluster-using-statefulsets.md | 10 +- .../content/2.13/tasks/troubleshooting.md | 32 ++-- .../tasks/multicluster-using-statefulsets.md | 8 +- .../tasks/multicluster-using-statefulsets.md | 8 +- .../content/2.15/tasks/troubleshooting.md | 10 +- .../tasks/multicluster-using-statefulsets.md | 8 +- .../content/2.16/tasks/troubleshooting.md | 159 +++++++++++------- .../tasks/multicluster-using-statefulsets.md | 8 +- .../content/2.17/tasks/troubleshooting.md | 54 ++---- .../content/2.18/tasks/troubleshooting.md | 20 +-- .../content/2.19/tasks/troubleshooting.md | 24 +-- 16 files changed, 204 insertions(+), 204 deletions(-) diff --git a/linkerd.io/content/2-edge/tasks/troubleshooting.md b/linkerd.io/content/2-edge/tasks/troubleshooting.md index 1796e074f1..ac3456634b 100644 --- a/linkerd.io/content/2-edge/tasks/troubleshooting.md +++ b/linkerd.io/content/2-edge/tasks/troubleshooting.md @@ -1172,7 +1172,7 @@ Example error: Ensure that the cluster role exists: ```bash -$ $ kubectl get clusterrole linkerd-cni +$ kubectl get clusterrole linkerd-cni NAME AGE linkerd-cni 54m ``` @@ -1180,7 +1180,7 @@ linkerd-cni 54m Also ensure you have permission to create ClusterRoles: ```bash -kubectl auth can-i create ClusterRoles +$ kubectl auth can-i create ClusterRoles yes ``` @@ -1197,7 +1197,7 @@ Example error: Ensure that the cluster role binding exists: ```bash -kubectl get clusterrolebinding linkerd-cni +$ kubectl get clusterrolebinding linkerd-cni NAME AGE linkerd-cni 54m ``` @@ -1205,7 +1205,7 @@ linkerd-cni 54m Also ensure you have permission to create ClusterRoleBindings: ```bash -kubectl auth can-i create ClusterRoleBindings +$ kubectl auth can-i create ClusterRoleBindings yes ``` @@ -1222,7 +1222,7 @@ Example error: Ensure that the CNI service account exists in the CNI namespace: ```bash -kubectl get ServiceAccount linkerd-cni -n linkerd-cni +$ kubectl get ServiceAccount linkerd-cni -n linkerd-cni NAME SECRETS AGE linkerd-cni 1 45m ``` @@ -1230,7 +1230,7 @@ linkerd-cni 1 45m Also ensure you have permission to create ServiceAccount: ```bash -kubectl auth can-i create ServiceAccounts -n linkerd-cni +$ kubectl auth can-i create ServiceAccounts -n linkerd-cni yes ``` @@ -1247,7 +1247,7 @@ Example error: Ensure that the CNI daemonset exists in the CNI namespace: ```bash -kubectl get ds -n linkerd-cni +$ kubectl get ds -n linkerd-cni NAME DESIRED CURRENT READY UP-TO-DATE AVAILABLE NODE SELECTOR AGE linkerd-cni 1 1 1 1 1 beta.kubernetes.io/os=linux 14m ``` @@ -1255,7 +1255,7 @@ linkerd-cni 1 1 1 1 1 beta.kubernet Also ensure you have permission to create DaemonSets: ```bash -kubectl auth can-i create DaemonSets -n linkerd-cni +$ kubectl auth can-i create DaemonSets -n linkerd-cni yes ``` @@ -1272,7 +1272,7 @@ Example failure: Ensure that all the CNI pods are running: ```bash -kubectl get po -n linkerd-cni +$ kubectl get po -n linkerd-cni NAME READY STATUS RESTARTS AGE linkerd-cni-rzp2q 1/1 Running 0 9m20s linkerd-cni-mf564 1/1 Running 0 9m22s @@ -1282,7 +1282,7 @@ linkerd-cni-p5670 1/1 Running 0 9m25s Ensure that all pods have finished the deployment of the CNI config and binary: ```bash -kubectl logs linkerd-cni-rzp2q -n linkerd-cni +$ kubectl logs linkerd-cni-rzp2q -n linkerd-cni Wrote linkerd CNI binaries to /host/opt/cni/bin Created CNI config /host/etc/cni/net.d/10-kindnet.conflist Done configuring CNI. Sleep=true @@ -1433,7 +1433,7 @@ rules: Expected rules for `linkerd-service-mirror-read-remote-creds` role: ```bash -kubectl --context=local get role linkerd-service-mirror-read-remote-creds -n linkerd-multicluster -o yaml +$ kubectl --context=local get role linkerd-service-mirror-read-remote-creds -n linkerd-multicluster -o yaml kind: Role metadata: labels: @@ -1623,7 +1623,7 @@ linkerd-linkerd-viz-web-check 2021-01-2 Also ensure you have permission to create ClusterRoles: ```bash -kubectl auth can-i create clusterroles +$ kubectl auth can-i create clusterroles yes ``` @@ -2009,7 +2009,7 @@ buoyant-cloud-agent 2020-11-13T00:59:50Z Also ensure you have permission to create ClusterRoles: ```bash -$ kubectl auth can-i create clusterroles +$ kubectl auth can-i create ClusterRoles yes ``` diff --git a/linkerd.io/content/2.10/tasks/getting-per-route-metrics.md b/linkerd.io/content/2.10/tasks/getting-per-route-metrics.md index 5c29298281..424ede9217 100644 --- a/linkerd.io/content/2.10/tasks/getting-per-route-metrics.md +++ b/linkerd.io/content/2.10/tasks/getting-per-route-metrics.md @@ -70,7 +70,7 @@ If you're not seeing any metrics, there are two likely culprits. In both cases, the service points to, run: ```bash -$ linkerd viz tap deploy/webapp -o wide | grep req +linkerd viz tap deploy/webapp -o wide | grep req ``` A sample output is: diff --git a/linkerd.io/content/2.10/tasks/rotating_webhooks_certificates.md b/linkerd.io/content/2.10/tasks/rotating_webhooks_certificates.md index 9585798983..bfa5b61e2d 100644 --- a/linkerd.io/content/2.10/tasks/rotating_webhooks_certificates.md +++ b/linkerd.io/content/2.10/tasks/rotating_webhooks_certificates.md @@ -55,9 +55,9 @@ for idx in "${!SECRETS[@]}"; do \ kubectl -n "${NS[$idx]}" delete secret "${SECRETS[$idx]}"; \ done -$ linkerd upgrade | kubectl apply -f - -$ linkerd viz install | kubectl apply -f - -$ linkerd jaeger install | kubectl apply -f - +linkerd upgrade | kubectl apply -f - +linkerd viz install | kubectl apply -f - +linkerd jaeger install | kubectl apply -f - ``` The above command will recreate the secrets without restarting Linkerd. @@ -74,7 +74,6 @@ they wil be overwritten by a new cert and key generated by the helm chart. Confirm that the secrets are recreated with new certificates: - ```bash for idx in "${!SECRETS[@]}"; do \ kubectl -n "${NS[$idx]}" get secret "${SECRETS[$idx]}" -ojsonpath='{.data.crt\.pem}' | \ @@ -86,9 +85,8 @@ done Ensure that Linkerd remains healthy: - ```bash -$ linkerd check +linkerd check ``` Restarting the pods that implement the webhooks and API services is usually not @@ -99,10 +97,10 @@ If you observe certificate expiry errors or mismatched CA certs, restart their pods with: ```sh -$ kubectl -n linkerd rollout restart deploy \ - linkerd-proxy-injector \ - linkerd-sp-validator \ +kubectl -n linkerd rollout restart deploy \ + linkerd-proxy-injector \ + linkerd-sp-validator \ -$ kubectl -n linkerd-viz rollout restart deploy tap tap-injector -$ kubectl -n linkerd-jaeger rollout restart deploy jaeger-injector +kubectl -n linkerd-viz rollout restart deploy tap tap-injector +kubectl -n linkerd-jaeger rollout restart deploy jaeger-injector ``` diff --git a/linkerd.io/content/2.11/tasks/multicluster-using-statefulsets.md b/linkerd.io/content/2.11/tasks/multicluster-using-statefulsets.md index 687520a57d..bf17be188e 100644 --- a/linkerd.io/content/2.11/tasks/multicluster-using-statefulsets.md +++ b/linkerd.io/content/2.11/tasks/multicluster-using-statefulsets.md @@ -71,9 +71,10 @@ west 1/1 0/0 true Once our clusters are created, we will install Linkerd and the multi-cluster extension. Finally, once both are installed, we need to link the two clusters -together so their services may be mirrored. As before, these steps are automated -through the provided scripts; please give them a look and see how the -controllers and links are generated for both clusters. +together so their services may be mirrored. To enable support for headless +services, we will pass an additional `--set "enableHeadlessServices=true"` flag +to `linkerd multicluster link`. As before, these steps are automated through the +provided scripts, but feel free to have a look! ```sh # Install Linkerd and multicluster, output to check should be a success @@ -178,7 +179,7 @@ nginx-set-2 2/2 Running 0 4m51s curl-56dc7d945d-s4n8j 0/2 PodInitializing 0 4s $ kubectl --context=k3d-west exec -it curl-56dc7d945d-s4n8j -c curl -- bin/sh -/$ prompt for curl pod +/$ # prompt for curl pod ``` If we now curl one of these instances, we will get back a response. @@ -220,7 +221,7 @@ export the service. $ kubectl --context=k3d-west label service nginx-svc mirror.linkerd.io/exported="true" service/nginx-svc labeled -kubectl --context=k3d-east get services +$ kubectl --context=k3d-east get services NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE kubernetes ClusterIP 10.43.0.1 443/TCP 20h nginx-svc-west ClusterIP None 80/TCP 29s @@ -260,7 +261,7 @@ $ kubectl --context=k3d-east exec pod curl-56dc7d945d-96r6p -it -c curl -- bin/s # however, the service and cluster domain will now be different, since we # are in a different cluster. # -$ curl nginx-set-0.nginx-svc-k3d-west.default.svc.east.cluster.local +$ curl nginx-set-0.nginx-svc-west.default.svc.east.cluster.local diff --git a/linkerd.io/content/2.12/tasks/multicluster-using-statefulsets.md b/linkerd.io/content/2.12/tasks/multicluster-using-statefulsets.md index bc0108fa23..544a1324cb 100644 --- a/linkerd.io/content/2.12/tasks/multicluster-using-statefulsets.md +++ b/linkerd.io/content/2.12/tasks/multicluster-using-statefulsets.md @@ -179,7 +179,7 @@ nginx-set-2 2/2 Running 0 4m51s curl-56dc7d945d-s4n8j 0/2 PodInitializing 0 4s $ kubectl --context=k3d-west exec -it curl-56dc7d945d-s4n8j -c curl -- sh -/$ prompt for curl pod +/$ #prompt for curl pod ``` If we now curl one of these instances, we will get back a response. @@ -235,7 +235,7 @@ endpoints for `nginx-svc-west` will have the same hostnames, but each hostname will point to one of the services we see above: ```sh -$ kubectl --context=k3d-east get endpoints nginx-svc-k3d-west -o yaml +$ kubectl --context=k3d-east get endpoints nginx-svc-west -o yaml subsets: - addresses: - hostname: nginx-set-0 @@ -256,7 +256,7 @@ NAME READY STATUS RESTARTS AGE curl-56dc7d945d-96r6p 2/2 Running 0 23m # exec and curl -$ kubectl --context=k3d-east exec curl-56dc7d945d-96r6p -it -c curl -- sh +$ kubectl --context=k3d-east exec pod curl-56dc7d945d-96r6p -it -c curl -- bin/sh # we want to curl the same hostname we see in the endpoints object above. # however, the service and cluster domain will now be different, since we # are in a different cluster. diff --git a/linkerd.io/content/2.13/tasks/multicluster-using-statefulsets.md b/linkerd.io/content/2.13/tasks/multicluster-using-statefulsets.md index 6311ed9e02..544a1324cb 100644 --- a/linkerd.io/content/2.13/tasks/multicluster-using-statefulsets.md +++ b/linkerd.io/content/2.13/tasks/multicluster-using-statefulsets.md @@ -48,8 +48,8 @@ The first step is to clone the demo repository on your local machine. ```sh # clone example repository -$ git clone git@github.com:mateiidavid/l2d-k3d-statefulset.git -$ cd l2d-k3d-statefulset +git clone git@github.com:mateiidavid/l2d-k3d-statefulset.git +cd l2d-k3d-statefulset ``` The second step consists of creating two `k3d` clusters named `east` and `west`, @@ -179,7 +179,7 @@ nginx-set-2 2/2 Running 0 4m51s curl-56dc7d945d-s4n8j 0/2 PodInitializing 0 4s $ kubectl --context=k3d-west exec -it curl-56dc7d945d-s4n8j -c curl -- sh -/$ prompt for curl pod +/$ #prompt for curl pod ``` If we now curl one of these instances, we will get back a response. @@ -235,7 +235,7 @@ endpoints for `nginx-svc-west` will have the same hostnames, but each hostname will point to one of the services we see above: ```sh -$ kubectl --context=k3d-east get endpoints nginx-svc-k3d-west -o yaml +$ kubectl --context=k3d-east get endpoints nginx-svc-west -o yaml subsets: - addresses: - hostname: nginx-set-0 @@ -256,7 +256,7 @@ NAME READY STATUS RESTARTS AGE curl-56dc7d945d-96r6p 2/2 Running 0 23m # exec and curl -$ kubectl --context=k3d-east exec curl-56dc7d945d-96r6p -it -c curl -- sh +$ kubectl --context=k3d-east exec pod curl-56dc7d945d-96r6p -it -c curl -- bin/sh # we want to curl the same hostname we see in the endpoints object above. # however, the service and cluster domain will now be different, since we # are in a different cluster. diff --git a/linkerd.io/content/2.13/tasks/troubleshooting.md b/linkerd.io/content/2.13/tasks/troubleshooting.md index e0bc46c57c..7ed6aaf5d1 100644 --- a/linkerd.io/content/2.13/tasks/troubleshooting.md +++ b/linkerd.io/content/2.13/tasks/troubleshooting.md @@ -1045,7 +1045,7 @@ Ensure the kube-system namespace has the `config.linkerd.io/admission-webhooks:disabled` label: ```bash -kubectl get namespace kube-system -oyaml +$ kubectl get namespace kube-system -oyaml kind: Namespace apiVersion: v1 metadata: @@ -1193,7 +1193,7 @@ Example error: Ensure that the CNI service account exists in the CNI namespace: ```bash -kubectl get ServiceAccount linkerd-cni -n linkerd-cni +$ kubectl get ServiceAccount linkerd-cni -n linkerd-cni NAME SECRETS AGE linkerd-cni 1 45m ``` @@ -1201,7 +1201,7 @@ linkerd-cni 1 45m Also ensure you have permission to create ServiceAccount: ```bash -kubectl auth can-i create ServiceAccounts -n linkerd-cni +$ kubectl auth can-i create ServiceAccounts -n linkerd-cni yes ``` @@ -1218,7 +1218,7 @@ Example error: Ensure that the CNI daemonset exists in the CNI namespace: ```bash -kubectl get ds -n linkerd-cni +$ kubectl get ds -n linkerd-cni NAME DESIRED CURRENT READY UP-TO-DATE AVAILABLE NODE SELECTOR AGE linkerd-cni 1 1 1 1 1 beta.kubernetes.io/os=linux 14m ``` @@ -1226,7 +1226,7 @@ linkerd-cni 1 1 1 1 1 beta.kubernet Also ensure you have permission to create DaemonSets: ```bash -kubectl auth can-i create DaemonSets -n linkerd-cni +$ kubectl auth can-i create DaemonSets -n linkerd-cni yes ``` @@ -1243,7 +1243,7 @@ Example failure: Ensure that all the CNI pods are running: ```bash -kubectl get po -n linkerd-cni +$ kubectl get po -n linkerd-cn NAME READY STATUS RESTARTS AGE linkerd-cni-rzp2q 1/1 Running 0 9m20s linkerd-cni-mf564 1/1 Running 0 9m22s @@ -1253,7 +1253,7 @@ linkerd-cni-p5670 1/1 Running 0 9m25s Ensure that all pods have finished the deployment of the CNI config and binary: ```bash -kubectl logs linkerd-cni-rzp2q -n linkerd-cni +$ kubectl logs linkerd-cni-rzp2q -n linkerd-cni Wrote linkerd CNI binaries to /host/opt/cni/bin Created CNI config /host/etc/cni/net.d/10-kindnet.conflist Done configuring CNI. Sleep=true @@ -1393,7 +1393,7 @@ rules: Expected rules for `linkerd-service-mirror-read-remote-creds` role: ```bash -kubectl --context=local get role linkerd-service-mirror-read-remote-creds -n linkerd-multicluster -o yaml +$ kubectl --context=local get role linkerd-service-mirror-read-remote-creds -n linkerd-multicluster -o yaml kind: Role metadata: labels: @@ -1555,7 +1555,7 @@ linkerd-linkerd-viz-web-check 2021-01-2 Also ensure you have permission to create ClusterRoles: ```bash -kubectl auth can-i create clusterroles +$ kubectl auth can-i create clusterroles yes ``` @@ -2023,7 +2023,7 @@ buoyant-cloud-agent 2020-11-13T00:59:50Z Also ensure you have permission to create ClusterRoles: ```bash -$ kubectl auth can-i create clusterroles +$ kubectl auth can-i create ClusterRoles yes ``` @@ -2107,14 +2107,14 @@ yes Ensure the `buoyant-cloud-agent` Deployment exists: ```bash -$ kubectl -n buoyant-cloud get deploy/buoyant-cloud-agent +kubectl -n buoyant-cloud get deploy/buoyant-cloud-agent ``` If the Deployment does not exist, the `linkerd-buoyant` installation may be missing or incomplete. To reinstall the extension: ```bash -$ linkerd-buoyant install | kubectl apply -f - +linkerd-buoyant install | kubectl apply -f - ``` ### √ buoyant-cloud-agent Deployment is running @@ -2180,7 +2180,7 @@ Agent version: v0.4.4 To update to the latest version: ```bash -$ linkerd-buoyant install | kubectl apply -f - +linkerd-buoyant install | kubectl apply -f - ``` ### √ buoyant-cloud-agent Deployment is running a single pod @@ -2194,7 +2194,7 @@ $ linkerd-buoyant install | kubectl apply -f - `buoyant-cloud-agent` should run as a singleton. Check for other pods: ```bash -$ kubectl get po -A --selector app=buoyant-cloud-agent +kubectl get po -A --selector app=buoyant-cloud-agent ``` ### √ buoyant-cloud-metrics DaemonSet exists @@ -2208,14 +2208,14 @@ $ kubectl get po -A --selector app=buoyant-cloud-agent Ensure the `buoyant-cloud-metrics` DaemonSet exists: ```bash -$ kubectl -n buoyant-cloud get daemonset/buoyant-cloud-metrics +kubectl -n buoyant-cloud get daemonset/buoyant-cloud-metrics ``` If the DaemonSet does not exist, the `linkerd-buoyant` installation may be missing or incomplete. To reinstall the extension: ```bash -$ linkerd-buoyant install | kubectl apply -f - +linkerd-buoyant install | kubectl apply -f - ``` ### √ buoyant-cloud-metrics DaemonSet is running diff --git a/linkerd.io/content/2.14/tasks/multicluster-using-statefulsets.md b/linkerd.io/content/2.14/tasks/multicluster-using-statefulsets.md index bc0108fa23..bf17be188e 100644 --- a/linkerd.io/content/2.14/tasks/multicluster-using-statefulsets.md +++ b/linkerd.io/content/2.14/tasks/multicluster-using-statefulsets.md @@ -178,8 +178,8 @@ nginx-set-1 2/2 Running 0 4m58s nginx-set-2 2/2 Running 0 4m51s curl-56dc7d945d-s4n8j 0/2 PodInitializing 0 4s -$ kubectl --context=k3d-west exec -it curl-56dc7d945d-s4n8j -c curl -- sh -/$ prompt for curl pod +$ kubectl --context=k3d-west exec -it curl-56dc7d945d-s4n8j -c curl -- bin/sh +/$ # prompt for curl pod ``` If we now curl one of these instances, we will get back a response. @@ -235,7 +235,7 @@ endpoints for `nginx-svc-west` will have the same hostnames, but each hostname will point to one of the services we see above: ```sh -$ kubectl --context=k3d-east get endpoints nginx-svc-k3d-west -o yaml +$ kubectl --context=k3d-east get endpoints nginx-svc-west -o yaml subsets: - addresses: - hostname: nginx-set-0 @@ -256,7 +256,7 @@ NAME READY STATUS RESTARTS AGE curl-56dc7d945d-96r6p 2/2 Running 0 23m # exec and curl -$ kubectl --context=k3d-east exec curl-56dc7d945d-96r6p -it -c curl -- sh +$ kubectl --context=k3d-east exec pod curl-56dc7d945d-96r6p -it -c curl -- bin/sh # we want to curl the same hostname we see in the endpoints object above. # however, the service and cluster domain will now be different, since we # are in a different cluster. diff --git a/linkerd.io/content/2.15/tasks/multicluster-using-statefulsets.md b/linkerd.io/content/2.15/tasks/multicluster-using-statefulsets.md index bc0108fa23..bf17be188e 100644 --- a/linkerd.io/content/2.15/tasks/multicluster-using-statefulsets.md +++ b/linkerd.io/content/2.15/tasks/multicluster-using-statefulsets.md @@ -178,8 +178,8 @@ nginx-set-1 2/2 Running 0 4m58s nginx-set-2 2/2 Running 0 4m51s curl-56dc7d945d-s4n8j 0/2 PodInitializing 0 4s -$ kubectl --context=k3d-west exec -it curl-56dc7d945d-s4n8j -c curl -- sh -/$ prompt for curl pod +$ kubectl --context=k3d-west exec -it curl-56dc7d945d-s4n8j -c curl -- bin/sh +/$ # prompt for curl pod ``` If we now curl one of these instances, we will get back a response. @@ -235,7 +235,7 @@ endpoints for `nginx-svc-west` will have the same hostnames, but each hostname will point to one of the services we see above: ```sh -$ kubectl --context=k3d-east get endpoints nginx-svc-k3d-west -o yaml +$ kubectl --context=k3d-east get endpoints nginx-svc-west -o yaml subsets: - addresses: - hostname: nginx-set-0 @@ -256,7 +256,7 @@ NAME READY STATUS RESTARTS AGE curl-56dc7d945d-96r6p 2/2 Running 0 23m # exec and curl -$ kubectl --context=k3d-east exec curl-56dc7d945d-96r6p -it -c curl -- sh +$ kubectl --context=k3d-east exec pod curl-56dc7d945d-96r6p -it -c curl -- bin/sh # we want to curl the same hostname we see in the endpoints object above. # however, the service and cluster domain will now be different, since we # are in a different cluster. diff --git a/linkerd.io/content/2.15/tasks/troubleshooting.md b/linkerd.io/content/2.15/tasks/troubleshooting.md index 9873774220..c5ede11b02 100644 --- a/linkerd.io/content/2.15/tasks/troubleshooting.md +++ b/linkerd.io/content/2.15/tasks/troubleshooting.md @@ -1241,7 +1241,7 @@ linkerd-cni 1 1 1 1 1 beta.kubernet Also ensure you have permission to create DaemonSets: ```bash -kubectl auth can-i create DaemonSets -n linkerd-cni +$ kubectl auth can-i create DaemonSets -n linkerd-cni yes ``` @@ -1408,7 +1408,7 @@ rules: Expected rules for `linkerd-service-mirror-read-remote-creds` role: ```bash -kubectl --context=local get role linkerd-service-mirror-read-remote-creds -n linkerd-multicluster -o yaml +$ kubectl --context=local get role linkerd-service-mirror-read-remote-creds -n linkerd-multicluster -o yaml kind: Role metadata: labels: @@ -1570,7 +1570,7 @@ linkerd-linkerd-viz-web-check 2021-01-2 Also ensure you have permission to create ClusterRoles: ```bash -kubectl auth can-i create clusterroles +$ kubectl auth can-i create clusterroles yes ``` @@ -1916,7 +1916,7 @@ Make sure that the `proxy-injector` is working correctly by running Ensure all the linkerd-jaeger pods are running with 2/2 ```bash -kubectl -n linkerd-jaeger get pods +$ kubectl -n linkerd-jaeger get pods NAME READY STATUS RESTARTS AGE jaeger-injector-548684d74b-bcq5h 2/2 Running 0 5s collector-69cc44dfbc-wqf6s 2/2 Running 0 5s @@ -2038,7 +2038,7 @@ buoyant-cloud-agent 2020-11-13T00:59:50Z Also ensure you have permission to create ClusterRoles: ```bash -$ kubectl auth can-i create clusterroles +$ kubectl auth can-i create ClusterRoles yes ``` diff --git a/linkerd.io/content/2.16/tasks/multicluster-using-statefulsets.md b/linkerd.io/content/2.16/tasks/multicluster-using-statefulsets.md index 9a143a2bce..b64d8882da 100644 --- a/linkerd.io/content/2.16/tasks/multicluster-using-statefulsets.md +++ b/linkerd.io/content/2.16/tasks/multicluster-using-statefulsets.md @@ -178,8 +178,8 @@ nginx-set-1 2/2 Running 0 4m58s nginx-set-2 2/2 Running 0 4m51s curl-56dc7d945d-s4n8j 0/2 PodInitializing 0 4s -$ kubectl --context=k3d-west exec -it curl-56dc7d945d-s4n8j -c curl -- sh -/$ prompt for curl pod +$ kubectl --context=k3d-west exec -it curl-56dc7d945d-s4n8j -c curl -- bin/sh +/$ # prompt for curl pod ``` If we now curl one of these instances, we will get back a response. @@ -235,7 +235,7 @@ endpoints for `nginx-svc-west` will have the same hostnames, but each hostname will point to one of the services we see above: ```sh -$ kubectl --context=k3d-east get endpoints nginx-svc-k3d-west -o yaml +$ kubectl --context=k3d-east get endpoints nginx-svc-west -o yaml subsets: - addresses: - hostname: nginx-set-0 @@ -256,7 +256,7 @@ NAME READY STATUS RESTARTS AGE curl-56dc7d945d-96r6p 2/2 Running 0 23m # exec and curl -$ kubectl --context=k3d-east exec curl-56dc7d945d-96r6p -it -c curl -- sh +$ kubectl --context=k3d-east exec pod curl-56dc7d945d-96r6p -it -c curl -- bin/sh # we want to curl the same hostname we see in the endpoints object above. # however, the service and cluster domain will now be different, since we # are in a different cluster. diff --git a/linkerd.io/content/2.16/tasks/troubleshooting.md b/linkerd.io/content/2.16/tasks/troubleshooting.md index 6f37f2e24e..c5ede11b02 100644 --- a/linkerd.io/content/2.16/tasks/troubleshooting.md +++ b/linkerd.io/content/2.16/tasks/troubleshooting.md @@ -1084,21 +1084,7 @@ Example warning: This happens when one of the control plane pods doesn't have at least two replicas running. This is likely caused by insufficient node resources. -## Extensions {#extensions} - -### √ namespace configuration for extensions {#l5d-extension-namespaces} - -Linkerd's extension model requires that each namespace that "owns" an extension -to be labelled with the extension name. For example, the namespace viz is -installed in would be labelled with `linkerd.io/extension=viz`. This warning is -triggered if an extension value is used for the label key more than once across -the cluster. - -To resolve this warning, ensure that the `linkerd.io/extension` namespace label -does not have any dupliate values, indicating that an extension has been -installed more than once in different namespaces. - -### Extensions checks +### The "extensions" checks {#extensions} When any [Extensions](extensions/) are installed, The Linkerd binary tries to invoke `check --output json` on the extension binaries. It is important that the @@ -1108,7 +1094,7 @@ extension binaries implement it. For more information, See Example error: ```bash -invalid extension check output from \"viz\" (JSON object expected) +invalid extension check output from \"jaeger\" (JSON object expected) ``` Make sure that the extension binary implements `check --output json` which @@ -1118,7 +1104,7 @@ returns the healthchecks in the Example error: ```bash -× Linkerd command viz exists +× Linkerd command jaeger exists ``` Make sure that relevant binary exists in `$PATH`. @@ -1327,17 +1313,6 @@ Example error: Make sure all the link objects are specified in the expected format. -### √ Link and CLI versions match {#l5d-multicluster-links-version} - -This warning indicates that there are Link resources which do not match the -version of the CLI. This usually means that the CLI has been upgraded but that -the Link resources have not and certain features may not be supported on those -Links until they are upgraded. - -To upgrade a Link, regenerate it. Refer to the -[multicluster docs](multicluster/#linking-the-clusters) for instructions on how -to do this. - ### √ remote cluster access credentials are valid {#l5d-smc-target-clusters-access} Example error: @@ -1433,7 +1408,7 @@ rules: Expected rules for `linkerd-service-mirror-read-remote-creds` role: ```bash -kubectl --context=local get role linkerd-service-mirror-read-remote-creds -n linkerd-multicluster -o yaml +$ kubectl --context=local get role linkerd-service-mirror-read-remote-creds -n linkerd-multicluster -o yaml kind: Role metadata: labels: @@ -1471,34 +1446,6 @@ NAME READY STATUS RESTARTS AGE linkerd-service-mirror-7bb8ff5967-zg265 2/2 Running 0 50m ``` -### √ extension is managing controllers {#l5d-multicluster-managed-controllers} - -Example error: - -```bash -‼ extension is managing controllers - * using legacy service mirror controller for Link: target - see https://linkerd.io/2/checks/#l5d-multicluster-managed-controllers for hints -``` - -In Linkerd `2.18` we introduced a declarative, GitOps-compatible approach to -establishing multicluster links. With this method, the controllers are -integrated into the multicluster extension, allowing you to supply the Link CR -and kubeconfig secrets manifests directly, without necessarily depending on the -`linkerd multicluster link` command. This differs from earlier versions of -Linkerd (pre-`v2.18`), where (in addition to the Link CR and secrets) controller -manifests needed to be provided each time a new link was created, requiring the -use of the `linkerd multicluster link` command — a process that was less suited -to a GitOps workflow. - -This check ensures the linked clusters are using the new model. To migrate from -the old model, update the multicluster extension, referring your links into the -new `controllers` entry, as detailed in the -[installing multicluster doc](installing-multicluster/#step-1-install-the-multicluster-control-plane). -The new controllers will be deployed, but they won't manage the links until the -old ones get deleted. Once the old ones are removed, the new controllers will -grab the Lease object allowing them to take over service mirroring. - ### √ all gateway mirrors are healthy {#l5d-multicluster-gateways-endpoints} Example errors: @@ -1623,7 +1570,7 @@ linkerd-linkerd-viz-web-check 2021-01-2 Also ensure you have permission to create ClusterRoles: ```bash -kubectl auth can-i create clusterroles +$ kubectl auth can-i create clusterroles yes ``` @@ -1897,6 +1844,88 @@ You should see all your pods here. If they are not: - Prometheus might be experiencing connectivity issues with the k8s api server. Check out the logs and delete the pod to flush any possible transient errors. +## The "linkerd-jaeger" checks {#l5d-jaeger} + +These checks only run when the `linkerd-jaeger` extension is installed. This +check is intended to verify the installation of linkerd-jaeger extension which +comprises of open-census collector and jaeger components along with +`jaeger-injector` which injects the specific trace configuration to the proxies. + +### √ linkerd-jaeger extension Namespace exists {#l5d-jaeger-ns-exists} + +This is the basic check used to verify if the linkerd-jaeger extension namespace +is installed or not. The extension can be installed by running the following +command + +```bash +linkerd jaeger install | kubectl apply -f - +``` + +The installation can be configured by using the `--set`, `--values`, +`--set-string` and `--set-file` flags. See +[Linkerd Jaeger Readme](https://www.github.com/linkerd/linkerd2/tree/main/jaeger/charts/linkerd-jaeger/README.md) +for a full list of configurable fields. + +### √ jaeger extension proxies are healthy {#l5d-jaeger-proxy-healthy} + +This error indicates that the proxies running in the jaeger extension are not +healthy. Ensure that linkerd-jaeger has been installed with all of the correct +setting or re-install as necessary. + +### √ jaeger extension proxies are up-to-date {#l5d-jaeger-proxy-cp-version} + +This warning indicates the proxies running in the jaeger extension are running +an old version. We recommend downloading the latest linkerd-jaeger and +upgrading. + +### √ jaeger extension proxies and cli versions match {#l5d-jaeger-proxy-cli-version} + +This warning indicates that the proxies running in the jaeger extension are +running a different version from the Linkerd CLI. We recommend keeping this +versions in sync by updating either the CLI or linkerd-jaeger as necessary. + +### √ jaeger extension pods are injected {#l5d-jaeger-pods-injection} + +```bash +× jaeger extension pods are injected + could not find proxy container for jaeger-6f98d5c979-scqlq pod + see https://linkerd.io/2/checks/#l5d-jaeger-pods-injections for hints +``` + +Ensure all the jaeger pods are injected + +```bash +$ kubectl -n linkerd-jaeger get pods +NAME READY STATUS RESTARTS AGE +collector-69cc44dfbc-rhpfg 2/2 Running 0 11s +jaeger-6f98d5c979-scqlq 2/2 Running 0 11s +jaeger-injector-6c594f5577-cz75h 2/2 Running 0 10s +``` + +Make sure that the `proxy-injector` is working correctly by running +`linkerd check` + +### √ jaeger extension pods are running {#l5d-jaeger-pods-running} + +```bash +× jaeger extension pods are running + container linkerd-proxy in pod jaeger-59f5595fc7-ttndp is not ready + see https://linkerd.io/2/checks/#l5d-jaeger-pods-running for hints +``` + +Ensure all the linkerd-jaeger pods are running with 2/2 + +```bash +$ kubectl -n linkerd-jaeger get pods +NAME READY STATUS RESTARTS AGE +jaeger-injector-548684d74b-bcq5h 2/2 Running 0 5s +collector-69cc44dfbc-wqf6s 2/2 Running 0 5s +jaeger-6f98d5c979-vs622 2/2 Running 0 5sh +``` + +Make sure that the `proxy-injector` is working correctly by running +`linkerd check` + ## The "linkerd-buoyant" checks {#l5d-buoyant} These checks only run when the `linkerd-buoyant` extension is installed. This @@ -2009,7 +2038,7 @@ buoyant-cloud-agent 2020-11-13T00:59:50Z Also ensure you have permission to create ClusterRoles: ```bash -$ kubectl auth can-i create clusterroles +$ kubectl auth can-i create ClusterRoles yes ``` @@ -2093,14 +2122,14 @@ yes Ensure the `buoyant-cloud-agent` Deployment exists: ```bash -$ kubectl -n buoyant-cloud get deploy/buoyant-cloud-agent +kubectl -n buoyant-cloud get deploy/buoyant-cloud-agent ``` If the Deployment does not exist, the `linkerd-buoyant` installation may be missing or incomplete. To reinstall the extension: ```bash -$ linkerd-buoyant install | kubectl apply -f - +linkerd-buoyant install | kubectl apply -f - ``` ### √ buoyant-cloud-agent Deployment is running @@ -2166,7 +2195,7 @@ Agent version: v0.4.4 To update to the latest version: ```bash -$ linkerd-buoyant install | kubectl apply -f - +linkerd-buoyant install | kubectl apply -f - ``` ### √ buoyant-cloud-agent Deployment is running a single pod @@ -2180,7 +2209,7 @@ $ linkerd-buoyant install | kubectl apply -f - `buoyant-cloud-agent` should run as a singleton. Check for other pods: ```bash -$ kubectl get po -A --selector app=buoyant-cloud-agent +kubectl get po -A --selector app=buoyant-cloud-agent ``` ### √ buoyant-cloud-metrics DaemonSet exists @@ -2194,14 +2223,14 @@ $ kubectl get po -A --selector app=buoyant-cloud-agent Ensure the `buoyant-cloud-metrics` DaemonSet exists: ```bash -$ kubectl -n buoyant-cloud get daemonset/buoyant-cloud-metrics +kubectl -n buoyant-cloud get daemonset/buoyant-cloud-metrics ``` If the DaemonSet does not exist, the `linkerd-buoyant` installation may be missing or incomplete. To reinstall the extension: ```bash -$ linkerd-buoyant install | kubectl apply -f - +linkerd-buoyant install | kubectl apply -f - ``` ### √ buoyant-cloud-metrics DaemonSet is running diff --git a/linkerd.io/content/2.17/tasks/multicluster-using-statefulsets.md b/linkerd.io/content/2.17/tasks/multicluster-using-statefulsets.md index bc0108fa23..bf17be188e 100644 --- a/linkerd.io/content/2.17/tasks/multicluster-using-statefulsets.md +++ b/linkerd.io/content/2.17/tasks/multicluster-using-statefulsets.md @@ -178,8 +178,8 @@ nginx-set-1 2/2 Running 0 4m58s nginx-set-2 2/2 Running 0 4m51s curl-56dc7d945d-s4n8j 0/2 PodInitializing 0 4s -$ kubectl --context=k3d-west exec -it curl-56dc7d945d-s4n8j -c curl -- sh -/$ prompt for curl pod +$ kubectl --context=k3d-west exec -it curl-56dc7d945d-s4n8j -c curl -- bin/sh +/$ # prompt for curl pod ``` If we now curl one of these instances, we will get back a response. @@ -235,7 +235,7 @@ endpoints for `nginx-svc-west` will have the same hostnames, but each hostname will point to one of the services we see above: ```sh -$ kubectl --context=k3d-east get endpoints nginx-svc-k3d-west -o yaml +$ kubectl --context=k3d-east get endpoints nginx-svc-west -o yaml subsets: - addresses: - hostname: nginx-set-0 @@ -256,7 +256,7 @@ NAME READY STATUS RESTARTS AGE curl-56dc7d945d-96r6p 2/2 Running 0 23m # exec and curl -$ kubectl --context=k3d-east exec curl-56dc7d945d-96r6p -it -c curl -- sh +$ kubectl --context=k3d-east exec pod curl-56dc7d945d-96r6p -it -c curl -- bin/sh # we want to curl the same hostname we see in the endpoints object above. # however, the service and cluster domain will now be different, since we # are in a different cluster. diff --git a/linkerd.io/content/2.17/tasks/troubleshooting.md b/linkerd.io/content/2.17/tasks/troubleshooting.md index c4a4407e90..9148b28210 100644 --- a/linkerd.io/content/2.17/tasks/troubleshooting.md +++ b/linkerd.io/content/2.17/tasks/troubleshooting.md @@ -1433,7 +1433,7 @@ rules: Expected rules for `linkerd-service-mirror-read-remote-creds` role: ```bash -kubectl --context=local get role linkerd-service-mirror-read-remote-creds -n linkerd-multicluster -o yaml +$ kubectl --context=local get role linkerd-service-mirror-read-remote-creds -n linkerd-multicluster -o yaml kind: Role metadata: labels: @@ -1471,34 +1471,6 @@ NAME READY STATUS RESTARTS AGE linkerd-service-mirror-7bb8ff5967-zg265 2/2 Running 0 50m ``` -### √ extension is managing controllers {#l5d-multicluster-managed-controllers} - -Example error: - -```bash -‼ extension is managing controllers - * using legacy service mirror controller for Link: target - see https://linkerd.io/2/checks/#l5d-multicluster-managed-controllers for hints -``` - -In Linkerd `2.18` we introduced a declarative, GitOps-compatible approach to -establishing multicluster links. With this method, the controllers are -integrated into the multicluster extension, allowing you to supply the Link CR -and kubeconfig secrets manifests directly, without necessarily depending on the -`linkerd multicluster link` command. This differs from earlier versions of -Linkerd (pre-`v2.18`), where (in addition to the Link CR and secrets) controller -manifests needed to be provided each time a new link was created, requiring the -use of the `linkerd multicluster link` command — a process that was less suited -to a GitOps workflow. - -This check ensures the linked clusters are using the new model. To migrate from -the old model, update the multicluster extension, referring your links into the -new `controllers` entry, as detailed in the -[installing multicluster doc](installing-multicluster/#step-1-install-the-multicluster-control-plane). -The new controllers will be deployed, but they won't manage the links until the -old ones get deleted. Once the old ones are removed, the new controllers will -grab the Lease object allowing them to take over service mirroring. - ### √ all gateway mirrors are healthy {#l5d-multicluster-gateways-endpoints} Example errors: @@ -1623,7 +1595,7 @@ linkerd-linkerd-viz-web-check 2021-01-2 Also ensure you have permission to create ClusterRoles: ```bash -kubectl auth can-i create clusterroles +$ kubectl auth can-i create clusterroles yes ``` @@ -1789,12 +1761,12 @@ Make sure that the `proxy-injector` is working correctly by running Ensure all the prometheus related resources are present and running correctly. ```bash -$ kubectl -n linkerd-viz get deploy,cm | grep prometheus +❯ kubectl -n linkerd-viz get deploy,cm | grep prometheus deployment.apps/prometheus 1/1 1 1 3m18s configmap/prometheus-config 1 3m18s -$ kubectl get clusterRoleBindings | grep prometheus +❯ kubectl get clusterRoleBindings | grep prometheus linkerd-linkerd-viz-prometheus ClusterRole/linkerd-linkerd-viz-prometheus 3m37s -$ kubectl get clusterRoles | grep prometheus +❯ kubectl get clusterRoles | grep prometheus linkerd-linkerd-viz-prometheus 2021-02-26T06:03:11Zh ``` @@ -1969,7 +1941,7 @@ Make sure that the `proxy-injector` is working correctly by running Ensure all the linkerd-jaeger pods are running with 2/2 ```bash -kubectl -n linkerd-jaeger get pods +$ kubectl -n linkerd-jaeger get pods NAME READY STATUS RESTARTS AGE jaeger-injector-548684d74b-bcq5h 2/2 Running 0 5s collector-69cc44dfbc-wqf6s 2/2 Running 0 5s @@ -2091,7 +2063,7 @@ buoyant-cloud-agent 2020-11-13T00:59:50Z Also ensure you have permission to create ClusterRoles: ```bash -$ kubectl auth can-i create clusterroles +$ kubectl auth can-i create ClusterRoles yes ``` @@ -2175,14 +2147,14 @@ yes Ensure the `buoyant-cloud-agent` Deployment exists: ```bash -$ kubectl -n buoyant-cloud get deploy/buoyant-cloud-agent +kubectl -n buoyant-cloud get deploy/buoyant-cloud-agent ``` If the Deployment does not exist, the `linkerd-buoyant` installation may be missing or incomplete. To reinstall the extension: ```bash -$ linkerd-buoyant install | kubectl apply -f - +linkerd-buoyant install | kubectl apply -f - ``` ### √ buoyant-cloud-agent Deployment is running @@ -2248,7 +2220,7 @@ Agent version: v0.4.4 To update to the latest version: ```bash -$ linkerd-buoyant install | kubectl apply -f - +linkerd-buoyant install | kubectl apply -f - ``` ### √ buoyant-cloud-agent Deployment is running a single pod @@ -2262,7 +2234,7 @@ $ linkerd-buoyant install | kubectl apply -f - `buoyant-cloud-agent` should run as a singleton. Check for other pods: ```bash -$ kubectl get po -A --selector app=buoyant-cloud-agent +kubectl get po -A --selector app=buoyant-cloud-agent ``` ### √ buoyant-cloud-metrics DaemonSet exists @@ -2276,14 +2248,14 @@ $ kubectl get po -A --selector app=buoyant-cloud-agent Ensure the `buoyant-cloud-metrics` DaemonSet exists: ```bash -$ kubectl -n buoyant-cloud get daemonset/buoyant-cloud-metrics +kubectl -n buoyant-cloud get daemonset/buoyant-cloud-metrics ``` If the DaemonSet does not exist, the `linkerd-buoyant` installation may be missing or incomplete. To reinstall the extension: ```bash -$ linkerd-buoyant install | kubectl apply -f - +linkerd-buoyant install | kubectl apply -f - ``` ### √ buoyant-cloud-metrics DaemonSet is running diff --git a/linkerd.io/content/2.18/tasks/troubleshooting.md b/linkerd.io/content/2.18/tasks/troubleshooting.md index ac4b53e57d..8cf602634c 100644 --- a/linkerd.io/content/2.18/tasks/troubleshooting.md +++ b/linkerd.io/content/2.18/tasks/troubleshooting.md @@ -1108,7 +1108,7 @@ extension binaries implement it. For more information, See Example error: ```bash -invalid extension check output from \"viz\" (JSON object expected) +invalid extension check output from \"jaeger\" (JSON object expected) ``` Make sure that the extension binary implements `check --output json` which @@ -1433,7 +1433,7 @@ rules: Expected rules for `linkerd-service-mirror-read-remote-creds` role: ```bash -kubectl --context=local get role linkerd-service-mirror-read-remote-creds -n linkerd-multicluster -o yaml +$ kubectl --context=local get role linkerd-service-mirror-read-remote-creds -n linkerd-multicluster -o yaml kind: Role metadata: labels: @@ -1623,7 +1623,7 @@ linkerd-linkerd-viz-web-check 2021-01-2 Also ensure you have permission to create ClusterRoles: ```bash -kubectl auth can-i create clusterroles +$ kubectl auth can-i create clusterroles yes ``` @@ -1810,7 +1810,7 @@ Example failure: Verify that the metrics API pod is running correctly ```bash -$ kubectl -n linkerd-viz get pods +❯ kubectl -n linkerd-viz get pods NAME READY STATUS RESTARTS AGE metrics-api-7bb8cb8489-cbq4m 2/2 Running 0 4m58s tap-injector-6b9bc6fc4-cgbr4 2/2 Running 0 4m56s @@ -2175,14 +2175,14 @@ yes Ensure the `buoyant-cloud-agent` Deployment exists: ```bash -$ kubectl -n buoyant-cloud get deploy/buoyant-cloud-agent +kubectl -n buoyant-cloud get deploy/buoyant-cloud-agent ``` If the Deployment does not exist, the `linkerd-buoyant` installation may be missing or incomplete. To reinstall the extension: ```bash -$ linkerd-buoyant install | kubectl apply -f - +linkerd-buoyant install | kubectl apply -f - ``` ### √ buoyant-cloud-agent Deployment is running @@ -2248,7 +2248,7 @@ Agent version: v0.4.4 To update to the latest version: ```bash -$ linkerd-buoyant install | kubectl apply -f - +linkerd-buoyant install | kubectl apply -f - ``` ### √ buoyant-cloud-agent Deployment is running a single pod @@ -2262,7 +2262,7 @@ $ linkerd-buoyant install | kubectl apply -f - `buoyant-cloud-agent` should run as a singleton. Check for other pods: ```bash -$ kubectl get po -A --selector app=buoyant-cloud-agent +kubectl get po -A --selector app=buoyant-cloud-agent ``` ### √ buoyant-cloud-metrics DaemonSet exists @@ -2276,14 +2276,14 @@ $ kubectl get po -A --selector app=buoyant-cloud-agent Ensure the `buoyant-cloud-metrics` DaemonSet exists: ```bash -$ kubectl -n buoyant-cloud get daemonset/buoyant-cloud-metrics +kubectl -n buoyant-cloud get daemonset/buoyant-cloud-metrics ``` If the DaemonSet does not exist, the `linkerd-buoyant` installation may be missing or incomplete. To reinstall the extension: ```bash -$ linkerd-buoyant install | kubectl apply -f - +linkerd-buoyant install | kubectl apply -f - ``` ### √ buoyant-cloud-metrics DaemonSet is running diff --git a/linkerd.io/content/2.19/tasks/troubleshooting.md b/linkerd.io/content/2.19/tasks/troubleshooting.md index 6f37f2e24e..6590ab620e 100644 --- a/linkerd.io/content/2.19/tasks/troubleshooting.md +++ b/linkerd.io/content/2.19/tasks/troubleshooting.md @@ -1433,7 +1433,7 @@ rules: Expected rules for `linkerd-service-mirror-read-remote-creds` role: ```bash -kubectl --context=local get role linkerd-service-mirror-read-remote-creds -n linkerd-multicluster -o yaml +$ kubectl --context=local get role linkerd-service-mirror-read-remote-creds -n linkerd-multicluster -o yaml kind: Role metadata: labels: @@ -1623,7 +1623,7 @@ linkerd-linkerd-viz-web-check 2021-01-2 Also ensure you have permission to create ClusterRoles: ```bash -kubectl auth can-i create clusterroles +$ kubectl auth can-i create clusterroles yes ``` @@ -1789,12 +1789,12 @@ Make sure that the `proxy-injector` is working correctly by running Ensure all the prometheus related resources are present and running correctly. ```bash -$ kubectl -n linkerd-viz get deploy,cm | grep prometheus +❯ kubectl -n linkerd-viz get deploy,cm | grep prometheus deployment.apps/prometheus 1/1 1 1 3m18s configmap/prometheus-config 1 3m18s -$ kubectl get clusterRoleBindings | grep prometheus +❯ kubectl get clusterRoleBindings | grep prometheus linkerd-linkerd-viz-prometheus ClusterRole/linkerd-linkerd-viz-prometheus 3m37s -$ kubectl get clusterRoles | grep prometheus +❯ kubectl get clusterRoles | grep prometheus linkerd-linkerd-viz-prometheus 2021-02-26T06:03:11Zh ``` @@ -2009,7 +2009,7 @@ buoyant-cloud-agent 2020-11-13T00:59:50Z Also ensure you have permission to create ClusterRoles: ```bash -$ kubectl auth can-i create clusterroles +$ kubectl auth can-i create ClusterRoles yes ``` @@ -2093,14 +2093,14 @@ yes Ensure the `buoyant-cloud-agent` Deployment exists: ```bash -$ kubectl -n buoyant-cloud get deploy/buoyant-cloud-agent +kubectl -n buoyant-cloud get deploy/buoyant-cloud-agent ``` If the Deployment does not exist, the `linkerd-buoyant` installation may be missing or incomplete. To reinstall the extension: ```bash -$ linkerd-buoyant install | kubectl apply -f - +linkerd-buoyant install | kubectl apply -f - ``` ### √ buoyant-cloud-agent Deployment is running @@ -2166,7 +2166,7 @@ Agent version: v0.4.4 To update to the latest version: ```bash -$ linkerd-buoyant install | kubectl apply -f - +linkerd-buoyant install | kubectl apply -f - ``` ### √ buoyant-cloud-agent Deployment is running a single pod @@ -2180,7 +2180,7 @@ $ linkerd-buoyant install | kubectl apply -f - `buoyant-cloud-agent` should run as a singleton. Check for other pods: ```bash -$ kubectl get po -A --selector app=buoyant-cloud-agent +kubectl get po -A --selector app=buoyant-cloud-agent ``` ### √ buoyant-cloud-metrics DaemonSet exists @@ -2194,14 +2194,14 @@ $ kubectl get po -A --selector app=buoyant-cloud-agent Ensure the `buoyant-cloud-metrics` DaemonSet exists: ```bash -$ kubectl -n buoyant-cloud get daemonset/buoyant-cloud-metrics +kubectl -n buoyant-cloud get daemonset/buoyant-cloud-metrics ``` If the DaemonSet does not exist, the `linkerd-buoyant` installation may be missing or incomplete. To reinstall the extension: ```bash -$ linkerd-buoyant install | kubectl apply -f - +linkerd-buoyant install | kubectl apply -f - ``` ### √ buoyant-cloud-metrics DaemonSet is running From 5de2a735a9d07e8e19c51bc59c1fb135f7111da4 Mon Sep 17 00:00:00 2001 From: bezarsnba Date: Sat, 31 Jan 2026 18:23:46 -0300 Subject: [PATCH 17/31] fix comment lines Signed-off-by: bezarsnba --- .../content/2-edge/tasks/multicluster-using-statefulsets.md | 2 +- .../content/2.12/tasks/multicluster-using-statefulsets.md | 2 +- .../content/2.18/tasks/multicluster-using-statefulsets.md | 2 +- .../content/2.19/tasks/multicluster-using-statefulsets.md | 2 +- 4 files changed, 4 insertions(+), 4 deletions(-) diff --git a/linkerd.io/content/2-edge/tasks/multicluster-using-statefulsets.md b/linkerd.io/content/2-edge/tasks/multicluster-using-statefulsets.md index e486a81d78..f179369598 100644 --- a/linkerd.io/content/2-edge/tasks/multicluster-using-statefulsets.md +++ b/linkerd.io/content/2-edge/tasks/multicluster-using-statefulsets.md @@ -178,7 +178,7 @@ nginx-set-2 2/2 Running 0 4m51s curl-56dc7d945d-s4n8j 0/2 PodInitializing 0 4s $ kubectl --context=k3d-west exec -it curl-56dc7d945d-s4n8j -c curl -- sh -/$ prompt for curl pod +/$ # prompt for curl pod ``` If we now curl one of these instances, we will get back a response. diff --git a/linkerd.io/content/2.12/tasks/multicluster-using-statefulsets.md b/linkerd.io/content/2.12/tasks/multicluster-using-statefulsets.md index 544a1324cb..c86fa0f5ab 100644 --- a/linkerd.io/content/2.12/tasks/multicluster-using-statefulsets.md +++ b/linkerd.io/content/2.12/tasks/multicluster-using-statefulsets.md @@ -179,7 +179,7 @@ nginx-set-2 2/2 Running 0 4m51s curl-56dc7d945d-s4n8j 0/2 PodInitializing 0 4s $ kubectl --context=k3d-west exec -it curl-56dc7d945d-s4n8j -c curl -- sh -/$ #prompt for curl pod +/$ # prompt for curl pod ``` If we now curl one of these instances, we will get back a response. diff --git a/linkerd.io/content/2.18/tasks/multicluster-using-statefulsets.md b/linkerd.io/content/2.18/tasks/multicluster-using-statefulsets.md index eace8623fa..2f5a04c073 100644 --- a/linkerd.io/content/2.18/tasks/multicluster-using-statefulsets.md +++ b/linkerd.io/content/2.18/tasks/multicluster-using-statefulsets.md @@ -178,7 +178,7 @@ nginx-set-2 2/2 Running 0 4m51s curl-56dc7d945d-s4n8j 0/2 PodInitializing 0 4s $ kubectl --context=k3d-west exec -it curl-56dc7d945d-s4n8j -c curl -- sh -/$ prompt for curl pod +/$ # prompt for curl pod ``` If we now curl one of these instances, we will get back a response. diff --git a/linkerd.io/content/2.19/tasks/multicluster-using-statefulsets.md b/linkerd.io/content/2.19/tasks/multicluster-using-statefulsets.md index eace8623fa..2f5a04c073 100644 --- a/linkerd.io/content/2.19/tasks/multicluster-using-statefulsets.md +++ b/linkerd.io/content/2.19/tasks/multicluster-using-statefulsets.md @@ -178,7 +178,7 @@ nginx-set-2 2/2 Running 0 4m51s curl-56dc7d945d-s4n8j 0/2 PodInitializing 0 4s $ kubectl --context=k3d-west exec -it curl-56dc7d945d-s4n8j -c curl -- sh -/$ prompt for curl pod +/$ # prompt for curl pod ``` If we now curl one of these instances, we will get back a response. From 6d336d142548acc53b18ea422f52f7cb063222c0 Mon Sep 17 00:00:00 2001 From: bezarsnba Date: Sat, 31 Jan 2026 18:38:24 -0300 Subject: [PATCH 18/31] revision blog session Signed-off-by: bezarsnba --- .../index.md | 8 ++++---- .../index.md | 6 +++--- .../index.md | 14 +++++++------- .../2019/1007-linkerd-distributed-tracing/index.md | 2 +- .../blog/2024/1015-edge-release-roundup/index.md | 2 +- .../2025/0725-tilt-linkerd-nginx-part-2/index.md | 2 +- 6 files changed, 17 insertions(+), 17 deletions(-) diff --git a/linkerd.io/content/blog/2016/1210-slow-cooker-load-testing-for-tough-software/index.md b/linkerd.io/content/blog/2016/1210-slow-cooker-load-testing-for-tough-software/index.md index 59909fb59f..e75e31a998 100644 --- a/linkerd.io/content/blog/2016/1210-slow-cooker-load-testing-for-tough-software/index.md +++ b/linkerd.io/content/blog/2016/1210-slow-cooker-load-testing-for-tough-software/index.md @@ -91,7 +91,7 @@ static content. The latencies given are in milliseconds, and we report the min, p50, p95, p99, p999, and max latencies seen during this 10 second interval. ```txt -./slow_cooker_linux_amd64 -url http://target:4140 -qps 50 -concurrency 10 http://perf-target-2:8080 +$ ./slow_cooker_linux_amd64 -url http://target:4140 -qps 50 -concurrency 10 http://perf-target-2:8080 # sending 500 req/s with concurrency=10 to http://perf-target-2:8080 ... # good/b/f t good% min [p50 p95 p99 p999] max change 2016-10-12T20:34:20Z 4990/0/0 5000 99% 10s 0 [ 1 3 4 9 ] 9 @@ -120,7 +120,7 @@ latency. In the example below, we have a backend server suffering from a catastrophic slow down: ```txt -./slow_cooker_linux_amd64 -totalRequests 100000 -qps 5 -concurrency 100 http://perf-target-1:8080 +$ ./slow_cooker_linux_amd64 -totalRequests 100000 -qps 5 -concurrency 100 http://perf-target-1:8080 # sending 500 req/s with concurrency=10 to http://perf-target-2:8080 ... # good/b/f t good% min [p50 p95 p99 p999] max change 2016-11-14T20:58:13Z 4900/0/0 5000 98% 10s 0 [ 1 2 6 8 ] 8 + @@ -165,7 +165,7 @@ For comparison, let’s start with a [ApacheBench](http://httpd.apache.org/docs/2.4/programs/ab.html)’s report: ```txt -ab -n 100000 -c 10 http://perf-target-1:8080/ +$ ab -n 100000 -c 10 http://perf-target-1:8080/ This is ApacheBench, Version 2.3 Copyright 1996 Adam Twiss, Zeus Technology Ltd, http://www.zeustech.net/ Licensed to The Apache Software Foundation, http://www.apache.org/ @@ -232,7 +232,7 @@ becomes much more clear that the 99.9th percentile is consistently high; this is not just a few outliers, but a persistent and ongoing problem: ```txt -./slow_cooker_linux_amd64 -totalRequests 20000 -qps 50 -concurrency 10 http://perf-target-2:8080 +$ ./slow_cooker_linux_amd64 -totalRequests 20000 -qps 50 -concurrency 10 http://perf-target-2:8080 # sending 500 req/s with concurrency=10 to http://perf-target-2:8080 ... # good/b/f t good% min [p50 p95 p99 p999] max change 2016-12-07T19:05:37Z 2510/0/0 5000 50% 10s 0 [ 0 0 2 4995 ] 4994 + diff --git a/linkerd.io/content/blog/2018/1208-service-profiles-for-per-route-metrics/index.md b/linkerd.io/content/blog/2018/1208-service-profiles-for-per-route-metrics/index.md index 8936d6c39f..b54e698c64 100644 --- a/linkerd.io/content/blog/2018/1208-service-profiles-for-per-route-metrics/index.md +++ b/linkerd.io/content/blog/2018/1208-service-profiles-for-per-route-metrics/index.md @@ -150,7 +150,7 @@ service—but we can't, because we haven't defined any routes for that service yet! ```bash -linkerd routes svc/webapp +$ linkerd routes svc/webapp ROUTE SERVICE SUCCESS RPS LATENCY_P50 LATENCY_P95 LATENCY_P99 [UNKNOWN] webapp 70.00% 5.7rps 34ms 100ms 269ms ``` @@ -188,13 +188,13 @@ spec: This service describes two routes that the webapp service responds to, `/books` and `/books/`. We add the service profile with `kubectl apply`: -`kubectl apply -f webapp-profile.yaml` +`$ kubectl apply -f webapp-profile.yaml` Within about a minute (Prometheus scrapes metrics from the proxies at regular intervals) per-route metrics will be available for the `webapp` service. ```bash -linkerd routes svc/webapp +$ linkerd routes svc/webapp ROUTE SERVICE SUCCESS RPS LATENCY_P50 LATENCY_P95 LATENCY_P99 /books/{id} webapp 100.00% 0.3rps 26ms 75ms 95ms /books webapp 56.25% 0.5rps 25ms 320ms 384ms diff --git a/linkerd.io/content/blog/2019/0222-how-we-designed-retries-in-linkerd-2-2/index.md b/linkerd.io/content/blog/2019/0222-how-we-designed-retries-in-linkerd-2-2/index.md index 01373a9ecc..3e672ef738 100644 --- a/linkerd.io/content/blog/2019/0222-how-we-designed-retries-in-linkerd-2-2/index.md +++ b/linkerd.io/content/blog/2019/0222-how-we-designed-retries-in-linkerd-2-2/index.md @@ -164,7 +164,7 @@ One thing that we can notice about this application is that the success rate of requests from the books service to the authors service is very poor: ```bash -linkerd routes deploy/books --to svc/authors +$ linkerd routes deploy/books --to svc/authors ROUTE SERVICE SUCCESS RPS LATENCY_P50 LATENCY_P95 LATENCY_P99 [DEFAULT] authors 54.24% 3.9rps 5ms 14ms 19ms ``` @@ -173,8 +173,8 @@ To get a better picture of what’s going on here, let’s add a service profile the authors service, generated from a Swagger definition: ```bash -curl --proto '=https' --tlsv1.2 -sSfL https://run.linkerd.io/booksapp/authors.swagger | linkerd profile --open-api - authors | kubectl apply -f - -linkerd routes deploy/books --to svc/authors +$ curl --proto '=https' --tlsv1.2 -sSfL https://run.linkerd.io/booksapp/authors.swagger | linkerd profile --open-api - authors | kubectl apply -f - +$ linkerd routes deploy/books --to svc/authors ROUTE SERVICE SUCCESS RPS LATENCY_P50 LATENCY_P95 LATENCY_P99 DELETE /authors/{id}.json authors 0.00% 0.0rps 0ms 0ms 0ms GET /authors.json authors 0.00% 0.0rps 0ms 0ms 0ms @@ -190,7 +190,7 @@ time. To correct this, let’s edit the authors service profile and make those requests retryable: ```bash -kubectl edit sp/authors.default.svc.cluster.local +$ kubectl edit sp/authors.default.svc.cluster.local [...] - condition: method: HEAD @@ -203,7 +203,7 @@ After editing the service profile, we see a nearly immediate improvement in success rate: ```bash -linkerd routes deploy/books --to svc/authors -o wide +$ linkerd routes deploy/books --to svc/authors -o wide ROUTE SERVICE EFFECTIVE_SUCCESS EFFECTIVE_RPS ACTUAL_SUCCESS ACTUAL_RPS LATENCY_P50 LATENCY_P95 LATENCY_P99 DELETE /authors/{id}.json authors 0.00% 0.0rps 0.00% 0.0rps 0ms 0ms 0ms GET /authors.json authors 0.00% 0.0rps 0.00% 0.0rps 0ms 0ms 0ms @@ -221,7 +221,7 @@ the purposes of this demo, I’ll set a timeout of 25ms. Your results will vary depending on the characteristics of your system. ```bash -kubectl edit sp/authors.default.svc.cluster.local +$ kubectl edit sp/authors.default.svc.cluster.local [...] - condition: method: HEAD @@ -235,7 +235,7 @@ We now see that success rate has come down slightly because some requests are timing out, but that the tail latency has been greatly reduced: ```bash -linkerd routes deploy/books --to svc/authors -o wide +$ linkerd routes deploy/books --to svc/authors -o wide ROUTE SERVICE EFFECTIVE_SUCCESS EFFECTIVE_RPS ACTUAL_SUCCESS ACTUAL_RPS LATENCY_P50 LATENCY_P95 LATENCY_P99 DELETE /authors/{id}.json authors 0.00% 0.0rps 0.00% 0.0rps 0ms 0ms 0ms GET /authors.json authors 0.00% 0.0rps 0.00% 0.0rps 0ms 0ms 0ms diff --git a/linkerd.io/content/blog/2019/1007-linkerd-distributed-tracing/index.md b/linkerd.io/content/blog/2019/1007-linkerd-distributed-tracing/index.md index 261bb1dbd5..790268900d 100644 --- a/linkerd.io/content/blog/2019/1007-linkerd-distributed-tracing/index.md +++ b/linkerd.io/content/blog/2019/1007-linkerd-distributed-tracing/index.md @@ -82,7 +82,7 @@ on your cluster. If you don't, you can follow the instructions. ```bash -linkerd version +$ linkerd version Client version: stable-2.6 Server version: stable-2.6 ``` diff --git a/linkerd.io/content/blog/2024/1015-edge-release-roundup/index.md b/linkerd.io/content/blog/2024/1015-edge-release-roundup/index.md index 66c8b2094c..8f61008f3b 100644 --- a/linkerd.io/content/blog/2024/1015-edge-release-roundup/index.md +++ b/linkerd.io/content/blog/2024/1015-edge-release-roundup/index.md @@ -120,7 +120,7 @@ command line to the new metrics available based on Gateway API routes, for example: ```bash {class=disable-copy} -linkerd viz stat-outbound -n faces deploy/face +$ linkerd viz stat-outbound -n faces deploy/face NAME SERVICE ROUTE TYPE BACKEND SUCCESS RPS LATENCY_P50 LATENCY_P95 LATENCY_P99 TIMEOUTS RETRIES face smiley:80 smiley-route HTTPRoute 78.36% 6.32 41ms 5886ms 9177ms 0.00% 0.00% ├─────────────────────► smiley:80 79.34% 5.57 20ms 5725ms 9145ms 0.00% diff --git a/linkerd.io/content/blog/2025/0725-tilt-linkerd-nginx-part-2/index.md b/linkerd.io/content/blog/2025/0725-tilt-linkerd-nginx-part-2/index.md index 44f4e39053..dc79544aa8 100644 --- a/linkerd.io/content/blog/2025/0725-tilt-linkerd-nginx-part-2/index.md +++ b/linkerd.io/content/blog/2025/0725-tilt-linkerd-nginx-part-2/index.md @@ -199,7 +199,7 @@ While the dashboard provides intuitive visualizations, the Linkerd CLI offers the same data in a terminal-friendly format for quick diagnostics: ```bash -linkerd viz top deployment/baz +$ linkerd viz top deployment/baz Source Destination Method Path Count Best Worst Last Success Rate foo-64798767b7-x8xvf baz-659dbf6895-v7gdm POST /demo.Baz/GetInfo 1187 81µs 9ms 124µs 100.00% bar-577c4bf849-cpdxl baz-659dbf6895-9twg9 POST /demo.Baz/GetInfo 1103 86µs 6ms 140µs 100.00% From c034ec2722ecba3f5aab88f285353138df222cbe Mon Sep 17 00:00:00 2001 From: bezarsnba Date: Sat, 31 Jan 2026 19:22:41 -0300 Subject: [PATCH 19/31] revert somes files and revision others Signed-off-by: bezarsnba --- .../2-edge/tasks/managing-egress-traffic.md | 2 +- .../2-edge/tasks/restricting-access.md | 4 ++-- .../content/2.11/tasks/restricting-access.md | 8 +++---- .../2.12/tasks/getting-per-route-metrics.md | 2 +- .../tasks/multicluster-using-statefulsets.md | 2 +- .../content/2.12/tasks/restricting-access.md | 4 ++-- linkerd.io/content/2.12/tasks/upgrade.md | 24 +++++++++---------- .../configuring-dynamic-request-routing.md | 2 +- .../tasks/multicluster-using-statefulsets.md | 2 +- .../2.13/tasks/uninstall-multicluster.md | 4 ++-- linkerd.io/content/2.13/tasks/upgrade.md | 18 +++++++------- .../2.14/tasks/getting-per-route-metrics.md | 2 +- .../content/2.14/tasks/troubleshooting.md | 16 ++++++------- linkerd.io/content/2.14/tasks/upgrade.md | 18 +++++++------- linkerd.io/content/2.15/tasks/multicluster.md | 6 ++--- 15 files changed, 57 insertions(+), 57 deletions(-) diff --git a/linkerd.io/content/2-edge/tasks/managing-egress-traffic.md b/linkerd.io/content/2-edge/tasks/managing-egress-traffic.md index 57142086b8..dbd9435a54 100644 --- a/linkerd.io/content/2-edge/tasks/managing-egress-traffic.md +++ b/linkerd.io/content/2-edge/tasks/managing-egress-traffic.md @@ -70,7 +70,7 @@ Now SSH into the client container and start generating some external traffic: ```bash kubectl -n egress-test exec -it client -c client -- sh -while sleep 1; do curl -s http://httpbin.org/get ; done +$ while sleep 1; do curl -s http://httpbin.org/get ; done ``` In a separate shell, you can use the Linkerd diagnostics command to visualize diff --git a/linkerd.io/content/2-edge/tasks/restricting-access.md b/linkerd.io/content/2-edge/tasks/restricting-access.md index ee1a438573..a5787cf354 100644 --- a/linkerd.io/content/2-edge/tasks/restricting-access.md +++ b/linkerd.io/content/2-edge/tasks/restricting-access.md @@ -35,7 +35,7 @@ Linkerd custom resource which describes a specific port of a workload. Once the access it (we'll see how to authorize clients in a moment). ```bash -$ kubectl apply -f - < Date: Sat, 31 Jan 2026 19:26:55 -0300 Subject: [PATCH 20/31] revision others files Signed-off-by: bezarsnba --- linkerd.io/content/2.15/tasks/restricting-access.md | 4 ++-- linkerd.io/content/2.15/tasks/uninstall.md | 6 +++--- linkerd.io/content/2.16/tasks/restricting-access.md | 2 +- linkerd.io/content/2.17/tasks/managing-egress-traffic.md | 2 +- linkerd.io/content/2.18/tasks/managing-egress-traffic.md | 4 ++-- linkerd.io/content/2.18/tasks/multicluster.md | 6 +++--- linkerd.io/content/2.18/tasks/troubleshooting.md | 2 +- linkerd.io/content/2.19/tasks/managing-egress-traffic.md | 2 +- linkerd.io/content/2.19/tasks/multicluster.md | 4 ++-- linkerd.io/content/2.19/tasks/restricting-access.md | 4 ++-- 10 files changed, 18 insertions(+), 18 deletions(-) diff --git a/linkerd.io/content/2.15/tasks/restricting-access.md b/linkerd.io/content/2.15/tasks/restricting-access.md index 08e06534f1..af25ce411e 100644 --- a/linkerd.io/content/2.15/tasks/restricting-access.md +++ b/linkerd.io/content/2.15/tasks/restricting-access.md @@ -84,7 +84,7 @@ to the Voting `Server` we created above. Note that meshed mTLS uses based on `ServiceAccounts`. ```bash -$ kubectl apply -f - <}} -Check out the service that was just created by the service mirror controller! +Check out the service that was just created by the controller! ```bash kubectl --context=west -n test get svc podinfo-east @@ -506,9 +506,9 @@ To cleanup the multicluster control plane, you can run: ```bash # Delete the link CR -kubectl --context=west -n linkerd-multicluster delete links east +$ kubectl --context=west -n linkerd-multicluster delete links east # Delete the test namespace and uninstall multicluster -for ctx in west east; do \ +$ for ctx in west east; do \ kubectl --context=${ctx} delete ns test; \ linkerd --context=${ctx} multicluster uninstall | kubectl --context=${ctx} delete -f - ; \ done diff --git a/linkerd.io/content/2.18/tasks/troubleshooting.md b/linkerd.io/content/2.18/tasks/troubleshooting.md index 8cf602634c..3d2b7f9e33 100644 --- a/linkerd.io/content/2.18/tasks/troubleshooting.md +++ b/linkerd.io/content/2.18/tasks/troubleshooting.md @@ -2091,7 +2091,7 @@ buoyant-cloud-agent 2020-11-13T00:59:50Z Also ensure you have permission to create ClusterRoles: ```bash -$ kubectl auth can-i create clusterroles +$ kubectl auth can-i create ClusterRoles yes ``` diff --git a/linkerd.io/content/2.19/tasks/managing-egress-traffic.md b/linkerd.io/content/2.19/tasks/managing-egress-traffic.md index f942410625..dc6b5280f0 100644 --- a/linkerd.io/content/2.19/tasks/managing-egress-traffic.md +++ b/linkerd.io/content/2.19/tasks/managing-egress-traffic.md @@ -70,7 +70,7 @@ Now SSH into the client container and start generating some external traffic: ```bash kubectl -n egress-test exec -it client -c client -- sh -while sleep 1; do curl -s http://httpbin.org/get ; done +$ while sleep 1; do curl -s https://httpbin.org/get ; done ``` In a separate shell, you can use the Linkerd diagnostics command to visualize diff --git a/linkerd.io/content/2.19/tasks/multicluster.md b/linkerd.io/content/2.19/tasks/multicluster.md index 2779b7616a..3a80b3f3ed 100644 --- a/linkerd.io/content/2.19/tasks/multicluster.md +++ b/linkerd.io/content/2.19/tasks/multicluster.md @@ -506,9 +506,9 @@ To cleanup the multicluster control plane, you can run: ```bash # Delete the link CR -kubectl --context=west -n linkerd-multicluster delete links east +$ kubectl --context=west -n linkerd-multicluster delete links east # Delete the test namespace and uninstall multicluster -for ctx in west east; do \ +$ for ctx in west east; do \ kubectl --context=${ctx} delete ns test; \ linkerd --context=${ctx} multicluster uninstall | kubectl --context=${ctx} delete -f - ; \ done diff --git a/linkerd.io/content/2.19/tasks/restricting-access.md b/linkerd.io/content/2.19/tasks/restricting-access.md index ee1a438573..a5787cf354 100644 --- a/linkerd.io/content/2.19/tasks/restricting-access.md +++ b/linkerd.io/content/2.19/tasks/restricting-access.md @@ -35,7 +35,7 @@ Linkerd custom resource which describes a specific port of a workload. Once the access it (we'll see how to authorize clients in a moment). ```bash -$ kubectl apply -f - < Date: Sat, 31 Jan 2026 19:36:23 -0300 Subject: [PATCH 21/31] others revisions and reverts Signed-off-by: bezarsnba --- linkerd.io/content/2.12/tasks/upgrade.md | 12 +++++----- linkerd.io/content/2.15/tasks/multicluster.md | 2 +- linkerd.io/content/2.15/tasks/upgrade.md | 24 +++++++++---------- linkerd.io/content/2.16/tasks/upgrade.md | 24 +++++++++---------- linkerd.io/content/2.17/tasks/upgrade.md | 24 +++++++++---------- 5 files changed, 43 insertions(+), 43 deletions(-) diff --git a/linkerd.io/content/2.12/tasks/upgrade.md b/linkerd.io/content/2.12/tasks/upgrade.md index 001388cc6b..2909fe2cac 100644 --- a/linkerd.io/content/2.12/tasks/upgrade.md +++ b/linkerd.io/content/2.12/tasks/upgrade.md @@ -393,14 +393,14 @@ For example, for the viz extension: ```bash # update the helm repo -helm repo update +$ helm repo up # delete your current instance # (assuming you didn't use the -n flag when installing) -helm delete linkerd-viz +$ helm delete linkerd-viz # install the new chart version -helm install linkerd-viz -n linkerd-viz --create-namespace linkerd/linkerd-viz +$ helm install linkerd-viz -n linkerd-viz --create-namespace linkerd/linkerd-viz ``` ##### Upgrading the multicluster extension with Helm @@ -555,11 +555,11 @@ chart or installing the Linkerd-Viz chart. See below for a complete list of values which have moved. ```bash -helm repo update +$ helm repo up # Upgrade the control plane (this will remove viz components). -helm upgrade linkerd2 linkerd/linkerd2 --reset-values -f values.yaml --atomic +$ helm upgrade linkerd2 linkerd/linkerd2 --reset-values -f values.yaml --atomic # Install the Linkerd-Viz extension to restore viz functionality. -helm install linkerd2-viz linkerd/linkerd2-viz -f viz-values.yaml +$ helm install linkerd2-viz linkerd/linkerd2-viz -f viz-values.yaml ``` The following values were removed from the Linkerd2 chart. Most of the removed diff --git a/linkerd.io/content/2.15/tasks/multicluster.md b/linkerd.io/content/2.15/tasks/multicluster.md index c84598eb41..f1af09cd21 100644 --- a/linkerd.io/content/2.15/tasks/multicluster.md +++ b/linkerd.io/content/2.15/tasks/multicluster.md @@ -175,7 +175,7 @@ running: ```bash for ctx in west east; do echo "Checking gateway on cluster: ${ctx} ........." - + kubectl --context=${ctx} -n linkerd-multicluster \ rollout status deploy/linkerd-gateway || break echo "-------------" done diff --git a/linkerd.io/content/2.15/tasks/upgrade.md b/linkerd.io/content/2.15/tasks/upgrade.md index a73f4d54fc..54ffdee4f4 100644 --- a/linkerd.io/content/2.15/tasks/upgrade.md +++ b/linkerd.io/content/2.15/tasks/upgrade.md @@ -379,7 +379,7 @@ Find the release name you used for the `linkerd2` chart, and the namespace where this release stored its config: ```bash -helm ls -A +$ helm ls -A NAME NAMESPACE REVISION UPDATED STATUS CHART APP VERSION linkerd default 1 2021-11-22 17:14:50.751436374 -0500 -05 deployed linkerd2-2.11.1 stable-2.11.1 ``` @@ -412,18 +412,18 @@ the `linkerd-crds`, `linkerd-control-plane` and `linkerd-smi` charts: ```bash # First migrate the CRDs -helm -n default get manifest linkerd | \ +$ helm -n default get manifest linkerd | \ yq 'select(.kind == "CustomResourceDefinition") | .metadata.name' | \ grep -v '\-\-\-' | \ xargs -n1 sh -c \ 'kubectl annotate --overwrite crd/$0 meta.helm.sh/release-name=linkerd-crds meta.helm.sh/release-namespace=linkerd' # Special case for TrafficSplit (only use if you have TrafficSplit CRs) -kubectl annotate --overwrite crd/trafficsplits.split.smi-spec.io \ +$ kubectl annotate --overwrite crd/trafficsplits.split.smi-spec.io \ meta.helm.sh/release-name=linkerd-smi meta.helm.sh/release-namespace=linkerd-smi # Now migrate all the other resources -helm -n default get manifest linkerd | \ +$ helm -n default get manifest linkerd | \ yq 'select(.kind != "CustomResourceDefinition")' | \ yq '.kind, .metadata.name, .metadata.namespace' | \ grep -v '\-\-\-' | @@ -437,14 +437,14 @@ above. ```bash # First make sure you update the helm repo -helm repo up +$ helm repo up # Install the linkerd-crds chart -helm install linkerd-crds -n linkerd --create-namespace linkerd/linkerd-crds +$ helm install linkerd-crds -n linkerd --create-namespace linkerd/linkerd-crds # Install the linkerd-control-plane chart # (remember to add any customizations you retrieved above) -helm install linkerd-control-plane \ +$ helm install linkerd-control-plane \ -n linkerd \ --set-file identityTrustAnchorsPEM=ca.crt \ --set-file identity.issuer.tls.crtPEM=issuer.crt \ @@ -452,8 +452,8 @@ helm install linkerd-control-plane \ linkerd/linkerd-control-plane # Optional: if using TrafficSplit CRs -helm repo add l5d-smi https://linkerd.github.io/linkerd-smi -helm install linkerd-smi -n linkerd-smi --create-namespace l5d-smi/linkerd-smi +$ helm repo add l5d-smi https://linkerd.github.io/linkerd-smi +$ helm install linkerd-smi -n linkerd-smi --create-namespace l5d-smi/linkerd-smi ``` ##### Cleaning up the old linkerd2 Helm release @@ -482,14 +482,14 @@ For example, for the viz extension: ```bash # update the helm repo -helm repo up +$ helm repo up # delete your current instance # (assuming you didn't use the -n flag when installing) -helm delete linkerd-viz +$ helm delete linkerd-viz # install the new chart version -helm install linkerd-viz -n linkerd-viz --create-namespace linkerd/linkerd-viz +$ helm install linkerd-viz -n linkerd-viz --create-namespace linkerd/linkerd-viz ``` ##### Upgrading the multicluster extension with Helm diff --git a/linkerd.io/content/2.16/tasks/upgrade.md b/linkerd.io/content/2.16/tasks/upgrade.md index a73f4d54fc..54ffdee4f4 100644 --- a/linkerd.io/content/2.16/tasks/upgrade.md +++ b/linkerd.io/content/2.16/tasks/upgrade.md @@ -379,7 +379,7 @@ Find the release name you used for the `linkerd2` chart, and the namespace where this release stored its config: ```bash -helm ls -A +$ helm ls -A NAME NAMESPACE REVISION UPDATED STATUS CHART APP VERSION linkerd default 1 2021-11-22 17:14:50.751436374 -0500 -05 deployed linkerd2-2.11.1 stable-2.11.1 ``` @@ -412,18 +412,18 @@ the `linkerd-crds`, `linkerd-control-plane` and `linkerd-smi` charts: ```bash # First migrate the CRDs -helm -n default get manifest linkerd | \ +$ helm -n default get manifest linkerd | \ yq 'select(.kind == "CustomResourceDefinition") | .metadata.name' | \ grep -v '\-\-\-' | \ xargs -n1 sh -c \ 'kubectl annotate --overwrite crd/$0 meta.helm.sh/release-name=linkerd-crds meta.helm.sh/release-namespace=linkerd' # Special case for TrafficSplit (only use if you have TrafficSplit CRs) -kubectl annotate --overwrite crd/trafficsplits.split.smi-spec.io \ +$ kubectl annotate --overwrite crd/trafficsplits.split.smi-spec.io \ meta.helm.sh/release-name=linkerd-smi meta.helm.sh/release-namespace=linkerd-smi # Now migrate all the other resources -helm -n default get manifest linkerd | \ +$ helm -n default get manifest linkerd | \ yq 'select(.kind != "CustomResourceDefinition")' | \ yq '.kind, .metadata.name, .metadata.namespace' | \ grep -v '\-\-\-' | @@ -437,14 +437,14 @@ above. ```bash # First make sure you update the helm repo -helm repo up +$ helm repo up # Install the linkerd-crds chart -helm install linkerd-crds -n linkerd --create-namespace linkerd/linkerd-crds +$ helm install linkerd-crds -n linkerd --create-namespace linkerd/linkerd-crds # Install the linkerd-control-plane chart # (remember to add any customizations you retrieved above) -helm install linkerd-control-plane \ +$ helm install linkerd-control-plane \ -n linkerd \ --set-file identityTrustAnchorsPEM=ca.crt \ --set-file identity.issuer.tls.crtPEM=issuer.crt \ @@ -452,8 +452,8 @@ helm install linkerd-control-plane \ linkerd/linkerd-control-plane # Optional: if using TrafficSplit CRs -helm repo add l5d-smi https://linkerd.github.io/linkerd-smi -helm install linkerd-smi -n linkerd-smi --create-namespace l5d-smi/linkerd-smi +$ helm repo add l5d-smi https://linkerd.github.io/linkerd-smi +$ helm install linkerd-smi -n linkerd-smi --create-namespace l5d-smi/linkerd-smi ``` ##### Cleaning up the old linkerd2 Helm release @@ -482,14 +482,14 @@ For example, for the viz extension: ```bash # update the helm repo -helm repo up +$ helm repo up # delete your current instance # (assuming you didn't use the -n flag when installing) -helm delete linkerd-viz +$ helm delete linkerd-viz # install the new chart version -helm install linkerd-viz -n linkerd-viz --create-namespace linkerd/linkerd-viz +$ helm install linkerd-viz -n linkerd-viz --create-namespace linkerd/linkerd-viz ``` ##### Upgrading the multicluster extension with Helm diff --git a/linkerd.io/content/2.17/tasks/upgrade.md b/linkerd.io/content/2.17/tasks/upgrade.md index a73f4d54fc..54ffdee4f4 100644 --- a/linkerd.io/content/2.17/tasks/upgrade.md +++ b/linkerd.io/content/2.17/tasks/upgrade.md @@ -379,7 +379,7 @@ Find the release name you used for the `linkerd2` chart, and the namespace where this release stored its config: ```bash -helm ls -A +$ helm ls -A NAME NAMESPACE REVISION UPDATED STATUS CHART APP VERSION linkerd default 1 2021-11-22 17:14:50.751436374 -0500 -05 deployed linkerd2-2.11.1 stable-2.11.1 ``` @@ -412,18 +412,18 @@ the `linkerd-crds`, `linkerd-control-plane` and `linkerd-smi` charts: ```bash # First migrate the CRDs -helm -n default get manifest linkerd | \ +$ helm -n default get manifest linkerd | \ yq 'select(.kind == "CustomResourceDefinition") | .metadata.name' | \ grep -v '\-\-\-' | \ xargs -n1 sh -c \ 'kubectl annotate --overwrite crd/$0 meta.helm.sh/release-name=linkerd-crds meta.helm.sh/release-namespace=linkerd' # Special case for TrafficSplit (only use if you have TrafficSplit CRs) -kubectl annotate --overwrite crd/trafficsplits.split.smi-spec.io \ +$ kubectl annotate --overwrite crd/trafficsplits.split.smi-spec.io \ meta.helm.sh/release-name=linkerd-smi meta.helm.sh/release-namespace=linkerd-smi # Now migrate all the other resources -helm -n default get manifest linkerd | \ +$ helm -n default get manifest linkerd | \ yq 'select(.kind != "CustomResourceDefinition")' | \ yq '.kind, .metadata.name, .metadata.namespace' | \ grep -v '\-\-\-' | @@ -437,14 +437,14 @@ above. ```bash # First make sure you update the helm repo -helm repo up +$ helm repo up # Install the linkerd-crds chart -helm install linkerd-crds -n linkerd --create-namespace linkerd/linkerd-crds +$ helm install linkerd-crds -n linkerd --create-namespace linkerd/linkerd-crds # Install the linkerd-control-plane chart # (remember to add any customizations you retrieved above) -helm install linkerd-control-plane \ +$ helm install linkerd-control-plane \ -n linkerd \ --set-file identityTrustAnchorsPEM=ca.crt \ --set-file identity.issuer.tls.crtPEM=issuer.crt \ @@ -452,8 +452,8 @@ helm install linkerd-control-plane \ linkerd/linkerd-control-plane # Optional: if using TrafficSplit CRs -helm repo add l5d-smi https://linkerd.github.io/linkerd-smi -helm install linkerd-smi -n linkerd-smi --create-namespace l5d-smi/linkerd-smi +$ helm repo add l5d-smi https://linkerd.github.io/linkerd-smi +$ helm install linkerd-smi -n linkerd-smi --create-namespace l5d-smi/linkerd-smi ``` ##### Cleaning up the old linkerd2 Helm release @@ -482,14 +482,14 @@ For example, for the viz extension: ```bash # update the helm repo -helm repo up +$ helm repo up # delete your current instance # (assuming you didn't use the -n flag when installing) -helm delete linkerd-viz +$ helm delete linkerd-viz # install the new chart version -helm install linkerd-viz -n linkerd-viz --create-namespace linkerd/linkerd-viz +$ helm install linkerd-viz -n linkerd-viz --create-namespace linkerd/linkerd-viz ``` ##### Upgrading the multicluster extension with Helm From 90f7e6109e12e24f45f2a15b0ba06a25e9e16a5b Mon Sep 17 00:00:00 2001 From: bezarsnba Date: Sat, 31 Jan 2026 19:45:47 -0300 Subject: [PATCH 22/31] fix somes files Signed-off-by: bezarsnba --- .../content/2.11/tasks/restricting-access.md | 4 ++-- linkerd.io/content/2.12/tasks/upgrade.md | 18 +++++++++--------- linkerd.io/content/2.15/tasks/upgrade.md | 6 +++--- linkerd.io/content/2.16/tasks/upgrade.md | 6 +++--- .../content/2.17/tasks/troubleshooting.md | 6 +++--- linkerd.io/content/2.17/tasks/upgrade.md | 6 +++--- 6 files changed, 23 insertions(+), 23 deletions(-) diff --git a/linkerd.io/content/2.11/tasks/restricting-access.md b/linkerd.io/content/2.11/tasks/restricting-access.md index 78ca9b2581..cb4db5c857 100644 --- a/linkerd.io/content/2.11/tasks/restricting-access.md +++ b/linkerd.io/content/2.11/tasks/restricting-access.md @@ -16,9 +16,9 @@ Ensure that you have Linkerd version stable-2.11.0 or later installed, and that it is healthy: ```bash -linkerd install | kubectl apply -f - +$ linkerd install | kubectl apply -f - ... -linkerd check -o short +$ linkerd check -o short ... ``` diff --git a/linkerd.io/content/2.12/tasks/upgrade.md b/linkerd.io/content/2.12/tasks/upgrade.md index 2909fe2cac..7edc1d56a9 100644 --- a/linkerd.io/content/2.12/tasks/upgrade.md +++ b/linkerd.io/content/2.12/tasks/upgrade.md @@ -259,8 +259,8 @@ kubectl label crd/trafficsplits.split.smi-spec.io \ Now you can install the SMI extension. E.g. via Helm: ```bash -$ helm repo add l5d-smi https://linkerd.github.io/linkerd-smi -$ helm install linkerd-smi -n linkerd-smi --create-namespace l5d-smi/linkerd-smi +helm repo add l5d-smi https://linkerd.github.io/linkerd-smi +helm install linkerd-smi -n linkerd-smi --create-namespace l5d-smi/linkerd-smi ``` And finally you can proceed with the usual @@ -348,7 +348,7 @@ above. ```bash # First make sure you update the helm repo -$ helm repo update +$ helm repo up # Install the linkerd-crds chart $ helm install linkerd-crds -n linkerd --create-namespace linkerd/linkerd-crds @@ -393,14 +393,14 @@ For example, for the viz extension: ```bash # update the helm repo -$ helm repo up +helm repo up # delete your current instance # (assuming you didn't use the -n flag when installing) -$ helm delete linkerd-viz +helm delete linkerd-viz # install the new chart version -$ helm install linkerd-viz -n linkerd-viz --create-namespace linkerd/linkerd-viz +helm install linkerd-viz -n linkerd-viz --create-namespace linkerd/linkerd-viz ``` ##### Upgrading the multicluster extension with Helm @@ -555,11 +555,11 @@ chart or installing the Linkerd-Viz chart. See below for a complete list of values which have moved. ```bash -$ helm repo up +helm repo up # Upgrade the control plane (this will remove viz components). -$ helm upgrade linkerd2 linkerd/linkerd2 --reset-values -f values.yaml --atomic +helm upgrade linkerd2 linkerd/linkerd2 --reset-values -f values.yaml --atomic # Install the Linkerd-Viz extension to restore viz functionality. -$ helm install linkerd2-viz linkerd/linkerd2-viz -f viz-values.yaml +helm install linkerd2-viz linkerd/linkerd2-viz -f viz-values.yaml ``` The following values were removed from the Linkerd2 chart. Most of the removed diff --git a/linkerd.io/content/2.15/tasks/upgrade.md b/linkerd.io/content/2.15/tasks/upgrade.md index 54ffdee4f4..a380c0a56c 100644 --- a/linkerd.io/content/2.15/tasks/upgrade.md +++ b/linkerd.io/content/2.15/tasks/upgrade.md @@ -437,14 +437,14 @@ above. ```bash # First make sure you update the helm repo -$ helm repo up +helm repo up # Install the linkerd-crds chart -$ helm install linkerd-crds -n linkerd --create-namespace linkerd/linkerd-crds +helm install linkerd-crds -n linkerd --create-namespace linkerd/linkerd-crds # Install the linkerd-control-plane chart # (remember to add any customizations you retrieved above) -$ helm install linkerd-control-plane \ +helm install linkerd-control-plane \ -n linkerd \ --set-file identityTrustAnchorsPEM=ca.crt \ --set-file identity.issuer.tls.crtPEM=issuer.crt \ diff --git a/linkerd.io/content/2.16/tasks/upgrade.md b/linkerd.io/content/2.16/tasks/upgrade.md index 54ffdee4f4..a380c0a56c 100644 --- a/linkerd.io/content/2.16/tasks/upgrade.md +++ b/linkerd.io/content/2.16/tasks/upgrade.md @@ -437,14 +437,14 @@ above. ```bash # First make sure you update the helm repo -$ helm repo up +helm repo up # Install the linkerd-crds chart -$ helm install linkerd-crds -n linkerd --create-namespace linkerd/linkerd-crds +helm install linkerd-crds -n linkerd --create-namespace linkerd/linkerd-crds # Install the linkerd-control-plane chart # (remember to add any customizations you retrieved above) -$ helm install linkerd-control-plane \ +helm install linkerd-control-plane \ -n linkerd \ --set-file identityTrustAnchorsPEM=ca.crt \ --set-file identity.issuer.tls.crtPEM=issuer.crt \ diff --git a/linkerd.io/content/2.17/tasks/troubleshooting.md b/linkerd.io/content/2.17/tasks/troubleshooting.md index 9148b28210..e3e6728667 100644 --- a/linkerd.io/content/2.17/tasks/troubleshooting.md +++ b/linkerd.io/content/2.17/tasks/troubleshooting.md @@ -1761,12 +1761,12 @@ Make sure that the `proxy-injector` is working correctly by running Ensure all the prometheus related resources are present and running correctly. ```bash -❯ kubectl -n linkerd-viz get deploy,cm | grep prometheus +$ kubectl -n linkerd-viz get deploy,cm | grep prometheus deployment.apps/prometheus 1/1 1 1 3m18s configmap/prometheus-config 1 3m18s -❯ kubectl get clusterRoleBindings | grep prometheus +$ kubectl get clusterRoleBindings | grep prometheus linkerd-linkerd-viz-prometheus ClusterRole/linkerd-linkerd-viz-prometheus 3m37s -❯ kubectl get clusterRoles | grep prometheus +$ kubectl get clusterRoles | grep prometheus linkerd-linkerd-viz-prometheus 2021-02-26T06:03:11Zh ``` diff --git a/linkerd.io/content/2.17/tasks/upgrade.md b/linkerd.io/content/2.17/tasks/upgrade.md index 54ffdee4f4..a380c0a56c 100644 --- a/linkerd.io/content/2.17/tasks/upgrade.md +++ b/linkerd.io/content/2.17/tasks/upgrade.md @@ -437,14 +437,14 @@ above. ```bash # First make sure you update the helm repo -$ helm repo up +helm repo up # Install the linkerd-crds chart -$ helm install linkerd-crds -n linkerd --create-namespace linkerd/linkerd-crds +helm install linkerd-crds -n linkerd --create-namespace linkerd/linkerd-crds # Install the linkerd-control-plane chart # (remember to add any customizations you retrieved above) -$ helm install linkerd-control-plane \ +helm install linkerd-control-plane \ -n linkerd \ --set-file identityTrustAnchorsPEM=ca.crt \ --set-file identity.issuer.tls.crtPEM=issuer.crt \ From bb76891c5a295e86d7fc2457d7eaf9ad2ec8473e Mon Sep 17 00:00:00 2001 From: bezarsnba Date: Sat, 31 Jan 2026 20:20:25 -0300 Subject: [PATCH 23/31] typo Signed-off-by: bezarsnba --- linkerd.io/content/2-edge/reference/iptables.md | 2 +- .../content/2-edge/tasks/multicluster-using-statefulsets.md | 2 +- linkerd.io/content/2.11/reference/iptables.md | 2 +- linkerd.io/content/2.12/reference/iptables.md | 2 +- linkerd.io/content/2.13/reference/iptables.md | 2 +- linkerd.io/content/2.14/reference/iptables.md | 2 +- linkerd.io/content/2.15/reference/iptables.md | 2 +- linkerd.io/content/2.16/reference/iptables.md | 2 +- linkerd.io/content/2.17/reference/iptables.md | 2 +- linkerd.io/content/2.18/reference/iptables.md | 2 +- linkerd.io/content/2.19/reference/iptables.md | 2 +- 11 files changed, 11 insertions(+), 11 deletions(-) diff --git a/linkerd.io/content/2-edge/reference/iptables.md b/linkerd.io/content/2-edge/reference/iptables.md index 44f16fb25a..c207bd01d4 100644 --- a/linkerd.io/content/2-edge/reference/iptables.md +++ b/linkerd.io/content/2-edge/reference/iptables.md @@ -164,7 +164,7 @@ Alternatively, if you want to inspect the iptables rules created for a pod, you can retrieve them through the following command: ```bash -kubectl -n logs linkerd-init +$ kubectl -n logs linkerd-init # where is the name of the pod # you want to see the iptables rules for ``` diff --git a/linkerd.io/content/2-edge/tasks/multicluster-using-statefulsets.md b/linkerd.io/content/2-edge/tasks/multicluster-using-statefulsets.md index f179369598..2aafd97635 100644 --- a/linkerd.io/content/2-edge/tasks/multicluster-using-statefulsets.md +++ b/linkerd.io/content/2-edge/tasks/multicluster-using-statefulsets.md @@ -220,7 +220,7 @@ export the service. $ kubectl --context=k3d-west label service nginx-svc mirror.linkerd.io/exported="true" service/nginx-svc labeled -kubectl --context=k3d-east get services +$ kubectl --context=k3d-east get services NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE kubernetes ClusterIP 10.43.0.1 443/TCP 20h nginx-svc-west ClusterIP None 80/TCP 29s diff --git a/linkerd.io/content/2.11/reference/iptables.md b/linkerd.io/content/2.11/reference/iptables.md index 9b4d229a59..c207bd01d4 100644 --- a/linkerd.io/content/2.11/reference/iptables.md +++ b/linkerd.io/content/2.11/reference/iptables.md @@ -164,7 +164,7 @@ Alternatively, if you want to inspect the iptables rules created for a pod, you can retrieve them through the following command: ```bash -kubectl -n logs linkerd-init +$ kubectl -n logs linkerd-init # where is the name of the pod # you want to see the iptables rules for ``` diff --git a/linkerd.io/content/2.12/reference/iptables.md b/linkerd.io/content/2.12/reference/iptables.md index 9b4d229a59..c207bd01d4 100644 --- a/linkerd.io/content/2.12/reference/iptables.md +++ b/linkerd.io/content/2.12/reference/iptables.md @@ -164,7 +164,7 @@ Alternatively, if you want to inspect the iptables rules created for a pod, you can retrieve them through the following command: ```bash -kubectl -n logs linkerd-init +$ kubectl -n logs linkerd-init # where is the name of the pod # you want to see the iptables rules for ``` diff --git a/linkerd.io/content/2.13/reference/iptables.md b/linkerd.io/content/2.13/reference/iptables.md index 9b4d229a59..c207bd01d4 100644 --- a/linkerd.io/content/2.13/reference/iptables.md +++ b/linkerd.io/content/2.13/reference/iptables.md @@ -164,7 +164,7 @@ Alternatively, if you want to inspect the iptables rules created for a pod, you can retrieve them through the following command: ```bash -kubectl -n logs linkerd-init +$ kubectl -n logs linkerd-init # where is the name of the pod # you want to see the iptables rules for ``` diff --git a/linkerd.io/content/2.14/reference/iptables.md b/linkerd.io/content/2.14/reference/iptables.md index 9b4d229a59..c207bd01d4 100644 --- a/linkerd.io/content/2.14/reference/iptables.md +++ b/linkerd.io/content/2.14/reference/iptables.md @@ -164,7 +164,7 @@ Alternatively, if you want to inspect the iptables rules created for a pod, you can retrieve them through the following command: ```bash -kubectl -n logs linkerd-init +$ kubectl -n logs linkerd-init # where is the name of the pod # you want to see the iptables rules for ``` diff --git a/linkerd.io/content/2.15/reference/iptables.md b/linkerd.io/content/2.15/reference/iptables.md index 9b4d229a59..c207bd01d4 100644 --- a/linkerd.io/content/2.15/reference/iptables.md +++ b/linkerd.io/content/2.15/reference/iptables.md @@ -164,7 +164,7 @@ Alternatively, if you want to inspect the iptables rules created for a pod, you can retrieve them through the following command: ```bash -kubectl -n logs linkerd-init +$ kubectl -n logs linkerd-init # where is the name of the pod # you want to see the iptables rules for ``` diff --git a/linkerd.io/content/2.16/reference/iptables.md b/linkerd.io/content/2.16/reference/iptables.md index 9b4d229a59..c207bd01d4 100644 --- a/linkerd.io/content/2.16/reference/iptables.md +++ b/linkerd.io/content/2.16/reference/iptables.md @@ -164,7 +164,7 @@ Alternatively, if you want to inspect the iptables rules created for a pod, you can retrieve them through the following command: ```bash -kubectl -n logs linkerd-init +$ kubectl -n logs linkerd-init # where is the name of the pod # you want to see the iptables rules for ``` diff --git a/linkerd.io/content/2.17/reference/iptables.md b/linkerd.io/content/2.17/reference/iptables.md index 9b4d229a59..c207bd01d4 100644 --- a/linkerd.io/content/2.17/reference/iptables.md +++ b/linkerd.io/content/2.17/reference/iptables.md @@ -164,7 +164,7 @@ Alternatively, if you want to inspect the iptables rules created for a pod, you can retrieve them through the following command: ```bash -kubectl -n logs linkerd-init +$ kubectl -n logs linkerd-init # where is the name of the pod # you want to see the iptables rules for ``` diff --git a/linkerd.io/content/2.18/reference/iptables.md b/linkerd.io/content/2.18/reference/iptables.md index 9b4d229a59..c207bd01d4 100644 --- a/linkerd.io/content/2.18/reference/iptables.md +++ b/linkerd.io/content/2.18/reference/iptables.md @@ -164,7 +164,7 @@ Alternatively, if you want to inspect the iptables rules created for a pod, you can retrieve them through the following command: ```bash -kubectl -n logs linkerd-init +$ kubectl -n logs linkerd-init # where is the name of the pod # you want to see the iptables rules for ``` diff --git a/linkerd.io/content/2.19/reference/iptables.md b/linkerd.io/content/2.19/reference/iptables.md index 9b4d229a59..c207bd01d4 100644 --- a/linkerd.io/content/2.19/reference/iptables.md +++ b/linkerd.io/content/2.19/reference/iptables.md @@ -164,7 +164,7 @@ Alternatively, if you want to inspect the iptables rules created for a pod, you can retrieve them through the following command: ```bash -kubectl -n logs linkerd-init +$ kubectl -n logs linkerd-init # where is the name of the pod # you want to see the iptables rules for ``` From 4d8987c9c394471f2928a938ff6aa5611004bfeb Mon Sep 17 00:00:00 2001 From: beza Date: Sun, 1 Feb 2026 09:56:41 -0300 Subject: [PATCH 24/31] Update linkerd.io/content/2.13/tasks/multicluster-using-statefulsets.md --- .../content/2.13/tasks/multicluster-using-statefulsets.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/linkerd.io/content/2.13/tasks/multicluster-using-statefulsets.md b/linkerd.io/content/2.13/tasks/multicluster-using-statefulsets.md index 48fa0f0d27..bf17be188e 100644 --- a/linkerd.io/content/2.13/tasks/multicluster-using-statefulsets.md +++ b/linkerd.io/content/2.13/tasks/multicluster-using-statefulsets.md @@ -179,7 +179,7 @@ nginx-set-2 2/2 Running 0 4m51s curl-56dc7d945d-s4n8j 0/2 PodInitializing 0 4s $ kubectl --context=k3d-west exec -it curl-56dc7d945d-s4n8j -c curl -- bin/sh -/$ #prompt for curl pod +/$ # prompt for curl pod ``` If we now curl one of these instances, we will get back a response. From 46dfe3b4c298e6a57833816e2b6f245e9f6677bc Mon Sep 17 00:00:00 2001 From: bezarsnba Date: Sun, 1 Feb 2026 10:23:37 -0300 Subject: [PATCH 25/31] fix upgrade page Signed-off-by: bezarsnba --- linkerd.io/content/2.14/tasks/troubleshooting.md | 2 +- linkerd.io/content/2.16/tasks/upgrade.md | 6 +++--- linkerd.io/content/2.17/tasks/upgrade.md | 12 ++++++------ 3 files changed, 10 insertions(+), 10 deletions(-) diff --git a/linkerd.io/content/2.14/tasks/troubleshooting.md b/linkerd.io/content/2.14/tasks/troubleshooting.md index 682c76bd9d..429e168ca3 100644 --- a/linkerd.io/content/2.14/tasks/troubleshooting.md +++ b/linkerd.io/content/2.14/tasks/troubleshooting.md @@ -2114,7 +2114,7 @@ If the Deployment does not exist, the `linkerd-buoyant` installation may be missing or incomplete. To reinstall the extension: ```bash -$ linkerd-buoyant install | kubectl apply -f - +linkerd-buoyant install | kubectl apply -f - ``` ### √ buoyant-cloud-agent Deployment is running diff --git a/linkerd.io/content/2.16/tasks/upgrade.md b/linkerd.io/content/2.16/tasks/upgrade.md index a380c0a56c..54ffdee4f4 100644 --- a/linkerd.io/content/2.16/tasks/upgrade.md +++ b/linkerd.io/content/2.16/tasks/upgrade.md @@ -437,14 +437,14 @@ above. ```bash # First make sure you update the helm repo -helm repo up +$ helm repo up # Install the linkerd-crds chart -helm install linkerd-crds -n linkerd --create-namespace linkerd/linkerd-crds +$ helm install linkerd-crds -n linkerd --create-namespace linkerd/linkerd-crds # Install the linkerd-control-plane chart # (remember to add any customizations you retrieved above) -helm install linkerd-control-plane \ +$ helm install linkerd-control-plane \ -n linkerd \ --set-file identityTrustAnchorsPEM=ca.crt \ --set-file identity.issuer.tls.crtPEM=issuer.crt \ diff --git a/linkerd.io/content/2.17/tasks/upgrade.md b/linkerd.io/content/2.17/tasks/upgrade.md index a380c0a56c..38cd784e47 100644 --- a/linkerd.io/content/2.17/tasks/upgrade.md +++ b/linkerd.io/content/2.17/tasks/upgrade.md @@ -437,14 +437,14 @@ above. ```bash # First make sure you update the helm repo -helm repo up +$ helm repo up # Install the linkerd-crds chart -helm install linkerd-crds -n linkerd --create-namespace linkerd/linkerd-crds +$ helm install linkerd-crds -n linkerd --create-namespace linkerd/linkerd-crds # Install the linkerd-control-plane chart # (remember to add any customizations you retrieved above) -helm install linkerd-control-plane \ +$ helm install linkerd-control-plane \ -n linkerd \ --set-file identityTrustAnchorsPEM=ca.crt \ --set-file identity.issuer.tls.crtPEM=issuer.crt \ @@ -482,14 +482,14 @@ For example, for the viz extension: ```bash # update the helm repo -$ helm repo up +helm repo up # delete your current instance # (assuming you didn't use the -n flag when installing) -$ helm delete linkerd-viz +helm delete linkerd-viz # install the new chart version -$ helm install linkerd-viz -n linkerd-viz --create-namespace linkerd/linkerd-viz +helm install linkerd-viz -n linkerd-viz --create-namespace linkerd/linkerd-viz ``` ##### Upgrading the multicluster extension with Helm From cadd025f83345fc9ae06e8bfe1c6d945a05e5b53 Mon Sep 17 00:00:00 2001 From: bezarsnba Date: Sun, 1 Feb 2026 10:27:18 -0300 Subject: [PATCH 26/31] revert page upgrade 2.15 and 2.16 Signed-off-by: bezarsnba --- linkerd.io/content/2.15/tasks/upgrade.md | 12 ++++++------ linkerd.io/content/2.16/tasks/upgrade.md | 6 +++--- 2 files changed, 9 insertions(+), 9 deletions(-) diff --git a/linkerd.io/content/2.15/tasks/upgrade.md b/linkerd.io/content/2.15/tasks/upgrade.md index a380c0a56c..38cd784e47 100644 --- a/linkerd.io/content/2.15/tasks/upgrade.md +++ b/linkerd.io/content/2.15/tasks/upgrade.md @@ -437,14 +437,14 @@ above. ```bash # First make sure you update the helm repo -helm repo up +$ helm repo up # Install the linkerd-crds chart -helm install linkerd-crds -n linkerd --create-namespace linkerd/linkerd-crds +$ helm install linkerd-crds -n linkerd --create-namespace linkerd/linkerd-crds # Install the linkerd-control-plane chart # (remember to add any customizations you retrieved above) -helm install linkerd-control-plane \ +$ helm install linkerd-control-plane \ -n linkerd \ --set-file identityTrustAnchorsPEM=ca.crt \ --set-file identity.issuer.tls.crtPEM=issuer.crt \ @@ -482,14 +482,14 @@ For example, for the viz extension: ```bash # update the helm repo -$ helm repo up +helm repo up # delete your current instance # (assuming you didn't use the -n flag when installing) -$ helm delete linkerd-viz +helm delete linkerd-viz # install the new chart version -$ helm install linkerd-viz -n linkerd-viz --create-namespace linkerd/linkerd-viz +helm install linkerd-viz -n linkerd-viz --create-namespace linkerd/linkerd-viz ``` ##### Upgrading the multicluster extension with Helm diff --git a/linkerd.io/content/2.16/tasks/upgrade.md b/linkerd.io/content/2.16/tasks/upgrade.md index 54ffdee4f4..38cd784e47 100644 --- a/linkerd.io/content/2.16/tasks/upgrade.md +++ b/linkerd.io/content/2.16/tasks/upgrade.md @@ -482,14 +482,14 @@ For example, for the viz extension: ```bash # update the helm repo -$ helm repo up +helm repo up # delete your current instance # (assuming you didn't use the -n flag when installing) -$ helm delete linkerd-viz +helm delete linkerd-viz # install the new chart version -$ helm install linkerd-viz -n linkerd-viz --create-namespace linkerd/linkerd-viz +helm install linkerd-viz -n linkerd-viz --create-namespace linkerd/linkerd-viz ``` ##### Upgrading the multicluster extension with Helm From 7267702c42bf70ff8c07c7c82dce35115fc89ef5 Mon Sep 17 00:00:00 2001 From: bezarsnba Date: Wed, 18 Feb 2026 09:48:03 -0300 Subject: [PATCH 27/31] revert config because we working another PR Signed-off-by: bezarsnba --- linkerd.io/content/2-edge/reference/iptables.md | 2 +- .../content/2-edge/tasks/managing-egress-traffic.md | 6 +++--- linkerd.io/content/2-edge/tasks/troubleshooting.md | 2 +- linkerd.io/content/2.11/reference/iptables.md | 2 +- linkerd.io/content/2.12/reference/iptables.md | 2 +- linkerd.io/content/2.13/reference/iptables.md | 2 +- linkerd.io/content/2.14/reference/iptables.md | 2 +- linkerd.io/content/2.14/tasks/troubleshooting.md | 2 +- linkerd.io/content/2.15/reference/iptables.md | 2 +- linkerd.io/content/2.15/tasks/troubleshooting.md | 2 +- linkerd.io/content/2.16/reference/iptables.md | 2 +- linkerd.io/content/2.16/tasks/troubleshooting.md | 2 +- linkerd.io/content/2.17/reference/iptables.md | 2 +- linkerd.io/content/2.17/tasks/managing-egress-traffic.md | 8 ++++---- linkerd.io/content/2.17/tasks/troubleshooting.md | 2 +- linkerd.io/content/2.18/reference/iptables.md | 2 +- linkerd.io/content/2.18/tasks/managing-egress-traffic.md | 8 ++++---- linkerd.io/content/2.18/tasks/troubleshooting.md | 2 +- linkerd.io/content/2.19/reference/iptables.md | 2 +- linkerd.io/content/2.19/tasks/managing-egress-traffic.md | 8 ++++---- linkerd.io/content/2.19/tasks/troubleshooting.md | 2 +- 21 files changed, 32 insertions(+), 32 deletions(-) diff --git a/linkerd.io/content/2-edge/reference/iptables.md b/linkerd.io/content/2-edge/reference/iptables.md index c207bd01d4..67a7ea89de 100644 --- a/linkerd.io/content/2-edge/reference/iptables.md +++ b/linkerd.io/content/2-edge/reference/iptables.md @@ -164,7 +164,7 @@ Alternatively, if you want to inspect the iptables rules created for a pod, you can retrieve them through the following command: ```bash -$ kubectl -n logs linkerd-init +$ kubectl -n logs linkerd-init # where is the name of the pod # you want to see the iptables rules for ``` diff --git a/linkerd.io/content/2-edge/tasks/managing-egress-traffic.md b/linkerd.io/content/2-edge/tasks/managing-egress-traffic.md index dbd9435a54..e630528157 100644 --- a/linkerd.io/content/2-edge/tasks/managing-egress-traffic.md +++ b/linkerd.io/content/2-edge/tasks/managing-egress-traffic.md @@ -235,7 +235,7 @@ Interestingly enough though, if we go back to our client shell and we try to initiate HTTPS traffic to the same service, it will not be allowed: ```bash -$ curl -v https://httpbin.org/get +$ curl -v http://httpbin.org/get curl: (35) TLS connect error: error:00000000:lib(0)::reason(0) ``` @@ -458,7 +458,7 @@ Now let's verify all works as expected: ```bash # plaintext traffic goes as expected to the /get path -$ curl https://httpbin.org/get +$ curl http://httpbin.org/get { "args": {}, "headers": { @@ -479,7 +479,7 @@ $ curl https://httpbin.org/ip # arbitrary unencrypted traffic goes to the internal service -$ curl https://google.com +$ curl http://google.com { "requestUID": "in:http-sid:terminus-grpc:-1-h1:80-190120723", "payload": "You cannot go there right now"} diff --git a/linkerd.io/content/2-edge/tasks/troubleshooting.md b/linkerd.io/content/2-edge/tasks/troubleshooting.md index ac3456634b..0aceee1234 100644 --- a/linkerd.io/content/2-edge/tasks/troubleshooting.md +++ b/linkerd.io/content/2-edge/tasks/troubleshooting.md @@ -1272,7 +1272,7 @@ Example failure: Ensure that all the CNI pods are running: ```bash -$ kubectl get po -n linkerd-cni +$ kubectl get po -n linkerd-cn NAME READY STATUS RESTARTS AGE linkerd-cni-rzp2q 1/1 Running 0 9m20s linkerd-cni-mf564 1/1 Running 0 9m22s diff --git a/linkerd.io/content/2.11/reference/iptables.md b/linkerd.io/content/2.11/reference/iptables.md index c207bd01d4..67a7ea89de 100644 --- a/linkerd.io/content/2.11/reference/iptables.md +++ b/linkerd.io/content/2.11/reference/iptables.md @@ -164,7 +164,7 @@ Alternatively, if you want to inspect the iptables rules created for a pod, you can retrieve them through the following command: ```bash -$ kubectl -n logs linkerd-init +$ kubectl -n logs linkerd-init # where is the name of the pod # you want to see the iptables rules for ``` diff --git a/linkerd.io/content/2.12/reference/iptables.md b/linkerd.io/content/2.12/reference/iptables.md index c207bd01d4..67a7ea89de 100644 --- a/linkerd.io/content/2.12/reference/iptables.md +++ b/linkerd.io/content/2.12/reference/iptables.md @@ -164,7 +164,7 @@ Alternatively, if you want to inspect the iptables rules created for a pod, you can retrieve them through the following command: ```bash -$ kubectl -n logs linkerd-init +$ kubectl -n logs linkerd-init # where is the name of the pod # you want to see the iptables rules for ``` diff --git a/linkerd.io/content/2.13/reference/iptables.md b/linkerd.io/content/2.13/reference/iptables.md index c207bd01d4..67a7ea89de 100644 --- a/linkerd.io/content/2.13/reference/iptables.md +++ b/linkerd.io/content/2.13/reference/iptables.md @@ -164,7 +164,7 @@ Alternatively, if you want to inspect the iptables rules created for a pod, you can retrieve them through the following command: ```bash -$ kubectl -n logs linkerd-init +$ kubectl -n logs linkerd-init # where is the name of the pod # you want to see the iptables rules for ``` diff --git a/linkerd.io/content/2.14/reference/iptables.md b/linkerd.io/content/2.14/reference/iptables.md index c207bd01d4..67a7ea89de 100644 --- a/linkerd.io/content/2.14/reference/iptables.md +++ b/linkerd.io/content/2.14/reference/iptables.md @@ -164,7 +164,7 @@ Alternatively, if you want to inspect the iptables rules created for a pod, you can retrieve them through the following command: ```bash -$ kubectl -n logs linkerd-init +$ kubectl -n logs linkerd-init # where is the name of the pod # you want to see the iptables rules for ``` diff --git a/linkerd.io/content/2.14/tasks/troubleshooting.md b/linkerd.io/content/2.14/tasks/troubleshooting.md index 429e168ca3..7ed6aaf5d1 100644 --- a/linkerd.io/content/2.14/tasks/troubleshooting.md +++ b/linkerd.io/content/2.14/tasks/troubleshooting.md @@ -1243,7 +1243,7 @@ Example failure: Ensure that all the CNI pods are running: ```bash -$ kubectl get po -n linkerd-cni +$ kubectl get po -n linkerd-cn NAME READY STATUS RESTARTS AGE linkerd-cni-rzp2q 1/1 Running 0 9m20s linkerd-cni-mf564 1/1 Running 0 9m22s diff --git a/linkerd.io/content/2.15/reference/iptables.md b/linkerd.io/content/2.15/reference/iptables.md index c207bd01d4..67a7ea89de 100644 --- a/linkerd.io/content/2.15/reference/iptables.md +++ b/linkerd.io/content/2.15/reference/iptables.md @@ -164,7 +164,7 @@ Alternatively, if you want to inspect the iptables rules created for a pod, you can retrieve them through the following command: ```bash -$ kubectl -n logs linkerd-init +$ kubectl -n logs linkerd-init # where is the name of the pod # you want to see the iptables rules for ``` diff --git a/linkerd.io/content/2.15/tasks/troubleshooting.md b/linkerd.io/content/2.15/tasks/troubleshooting.md index c5ede11b02..250d30d24e 100644 --- a/linkerd.io/content/2.15/tasks/troubleshooting.md +++ b/linkerd.io/content/2.15/tasks/troubleshooting.md @@ -1258,7 +1258,7 @@ Example failure: Ensure that all the CNI pods are running: ```bash -$ kubectl get po -n linkerd-cni +$ kubectl get po -n linkerd-cn NAME READY STATUS RESTARTS AGE linkerd-cni-rzp2q 1/1 Running 0 9m20s linkerd-cni-mf564 1/1 Running 0 9m22s diff --git a/linkerd.io/content/2.16/reference/iptables.md b/linkerd.io/content/2.16/reference/iptables.md index c207bd01d4..67a7ea89de 100644 --- a/linkerd.io/content/2.16/reference/iptables.md +++ b/linkerd.io/content/2.16/reference/iptables.md @@ -164,7 +164,7 @@ Alternatively, if you want to inspect the iptables rules created for a pod, you can retrieve them through the following command: ```bash -$ kubectl -n logs linkerd-init +$ kubectl -n logs linkerd-init # where is the name of the pod # you want to see the iptables rules for ``` diff --git a/linkerd.io/content/2.16/tasks/troubleshooting.md b/linkerd.io/content/2.16/tasks/troubleshooting.md index c5ede11b02..250d30d24e 100644 --- a/linkerd.io/content/2.16/tasks/troubleshooting.md +++ b/linkerd.io/content/2.16/tasks/troubleshooting.md @@ -1258,7 +1258,7 @@ Example failure: Ensure that all the CNI pods are running: ```bash -$ kubectl get po -n linkerd-cni +$ kubectl get po -n linkerd-cn NAME READY STATUS RESTARTS AGE linkerd-cni-rzp2q 1/1 Running 0 9m20s linkerd-cni-mf564 1/1 Running 0 9m22s diff --git a/linkerd.io/content/2.17/reference/iptables.md b/linkerd.io/content/2.17/reference/iptables.md index c207bd01d4..67a7ea89de 100644 --- a/linkerd.io/content/2.17/reference/iptables.md +++ b/linkerd.io/content/2.17/reference/iptables.md @@ -164,7 +164,7 @@ Alternatively, if you want to inspect the iptables rules created for a pod, you can retrieve them through the following command: ```bash -$ kubectl -n logs linkerd-init +$ kubectl -n logs linkerd-init # where is the name of the pod # you want to see the iptables rules for ``` diff --git a/linkerd.io/content/2.17/tasks/managing-egress-traffic.md b/linkerd.io/content/2.17/tasks/managing-egress-traffic.md index c3690fde4a..ab34eb783f 100644 --- a/linkerd.io/content/2.17/tasks/managing-egress-traffic.md +++ b/linkerd.io/content/2.17/tasks/managing-egress-traffic.md @@ -69,7 +69,7 @@ Now SSH into the client container and start generating some external traffic: ```bash kubectl -n egress-test exec -it client-xxx -c client -- sh -$ while sleep 1; do curl -s https://httpbin.org/get ; done +$ while sleep 1; do curl -s http://httpbin.org/get ; done ``` In a separate shell, you can use the Linkerd diagnostics command to visualize @@ -190,7 +190,7 @@ Interestingly enough though, if we go back to our client shell and we try to initiate HTTPS traffic to the same service, it will not be allowed: ```bash -$ curl -v https://httpbin.org/get +$ curl -v http://httpbin.org/get curl: (35) TLS connect error: error:00000000:lib(0)::reason(0) ``` @@ -413,7 +413,7 @@ Now let's verify all works as expected: ```bash # plaintext traffic goes as expected to the /get path -$ curl https://httpbin.org/get +$ curl http://httpbin.org/get { "args": {}, "headers": { @@ -434,7 +434,7 @@ $ curl https://httpbin.org/ip # arbitrary unencrypted traffic goes to the internal service -$ curl https://google.com +$ curl http://google.com { "requestUID": "in:http-sid:terminus-grpc:-1-h1:80-190120723", "payload": "You cannot go there right now"} diff --git a/linkerd.io/content/2.17/tasks/troubleshooting.md b/linkerd.io/content/2.17/tasks/troubleshooting.md index e3e6728667..e5ff32e5d9 100644 --- a/linkerd.io/content/2.17/tasks/troubleshooting.md +++ b/linkerd.io/content/2.17/tasks/troubleshooting.md @@ -1272,7 +1272,7 @@ Example failure: Ensure that all the CNI pods are running: ```bash -$ kubectl get po -n linkerd-cni +$ kubectl get po -n linkerd-cn NAME READY STATUS RESTARTS AGE linkerd-cni-rzp2q 1/1 Running 0 9m20s linkerd-cni-mf564 1/1 Running 0 9m22s diff --git a/linkerd.io/content/2.18/reference/iptables.md b/linkerd.io/content/2.18/reference/iptables.md index c207bd01d4..67a7ea89de 100644 --- a/linkerd.io/content/2.18/reference/iptables.md +++ b/linkerd.io/content/2.18/reference/iptables.md @@ -164,7 +164,7 @@ Alternatively, if you want to inspect the iptables rules created for a pod, you can retrieve them through the following command: ```bash -$ kubectl -n logs linkerd-init +$ kubectl -n logs linkerd-init # where is the name of the pod # you want to see the iptables rules for ``` diff --git a/linkerd.io/content/2.18/tasks/managing-egress-traffic.md b/linkerd.io/content/2.18/tasks/managing-egress-traffic.md index f5c9e97a1f..f24c292e88 100644 --- a/linkerd.io/content/2.18/tasks/managing-egress-traffic.md +++ b/linkerd.io/content/2.18/tasks/managing-egress-traffic.md @@ -70,7 +70,7 @@ Now SSH into the client container and start generating some external traffic: ```bash kubectl -n egress-test exec -it client -c client -- sh -$ while sleep 1; do curl -s https://httpbin.org/get ; done +$ while sleep 1; do curl -s http://httpbin.org/get ; done ``` In a separate shell, you can use the Linkerd diagnostics command to visualize @@ -235,7 +235,7 @@ Interestingly enough though, if we go back to our client shell and we try to initiate HTTPS traffic to the same service, it will not be allowed: ```bash -$ curl -v https://httpbin.org/get +$ curl -v http://httpbin.org/get curl: (35) TLS connect error: error:00000000:lib(0)::reason(0) ``` @@ -458,7 +458,7 @@ Now let's verify all works as expected: ```bash # plaintext traffic goes as expected to the /get path -$ curl https://httpbin.org/get +$ curl http://httpbin.org/get { "args": {}, "headers": { @@ -479,7 +479,7 @@ $ curl https://httpbin.org/ip # arbitrary unencrypted traffic goes to the internal service -$ curl https://google.com +$ curl http://google.com { "requestUID": "in:http-sid:terminus-grpc:-1-h1:80-190120723", "payload": "You cannot go there right now"} diff --git a/linkerd.io/content/2.18/tasks/troubleshooting.md b/linkerd.io/content/2.18/tasks/troubleshooting.md index 3d2b7f9e33..dfbc9a5bfe 100644 --- a/linkerd.io/content/2.18/tasks/troubleshooting.md +++ b/linkerd.io/content/2.18/tasks/troubleshooting.md @@ -1272,7 +1272,7 @@ Example failure: Ensure that all the CNI pods are running: ```bash -$ kubectl get po -n linkerd-cni +$ kubectl get po -n linkerd-cn NAME READY STATUS RESTARTS AGE linkerd-cni-rzp2q 1/1 Running 0 9m20s linkerd-cni-mf564 1/1 Running 0 9m22s diff --git a/linkerd.io/content/2.19/reference/iptables.md b/linkerd.io/content/2.19/reference/iptables.md index c207bd01d4..67a7ea89de 100644 --- a/linkerd.io/content/2.19/reference/iptables.md +++ b/linkerd.io/content/2.19/reference/iptables.md @@ -164,7 +164,7 @@ Alternatively, if you want to inspect the iptables rules created for a pod, you can retrieve them through the following command: ```bash -$ kubectl -n logs linkerd-init +$ kubectl -n logs linkerd-init # where is the name of the pod # you want to see the iptables rules for ``` diff --git a/linkerd.io/content/2.19/tasks/managing-egress-traffic.md b/linkerd.io/content/2.19/tasks/managing-egress-traffic.md index dc6b5280f0..a7e9d2492e 100644 --- a/linkerd.io/content/2.19/tasks/managing-egress-traffic.md +++ b/linkerd.io/content/2.19/tasks/managing-egress-traffic.md @@ -70,7 +70,7 @@ Now SSH into the client container and start generating some external traffic: ```bash kubectl -n egress-test exec -it client -c client -- sh -$ while sleep 1; do curl -s https://httpbin.org/get ; done +$ while sleep 1; do curl -s http://httpbin.org/get ; done ``` In a separate shell, you can use the Linkerd diagnostics command to visualize @@ -235,7 +235,7 @@ Interestingly enough though, if we go back to our client shell and we try to initiate HTTPS traffic to the same service, it will not be allowed: ```bash -$ curl -v https://httpbin.org/get +$ curl -v http://httpbin.org/get curl: (35) TLS connect error: error:00000000:lib(0)::reason(0) ``` @@ -458,7 +458,7 @@ Now let's verify all works as expected: ```bash # plaintext traffic goes as expected to the /get path -$ curl https://httpbin.org/get +$ curl http://httpbin.org/get { "args": {}, "headers": { @@ -479,7 +479,7 @@ $ curl https://httpbin.org/ip # arbitrary unencrypted traffic goes to the internal service -$ curl https://google.com +$ curl http://google.com { "requestUID": "in:http-sid:terminus-grpc:-1-h1:80-190120723", "payload": "You cannot go there right now"} diff --git a/linkerd.io/content/2.19/tasks/troubleshooting.md b/linkerd.io/content/2.19/tasks/troubleshooting.md index 6590ab620e..aae02ac2bb 100644 --- a/linkerd.io/content/2.19/tasks/troubleshooting.md +++ b/linkerd.io/content/2.19/tasks/troubleshooting.md @@ -1272,7 +1272,7 @@ Example failure: Ensure that all the CNI pods are running: ```bash -$ kubectl get po -n linkerd-cni +$ kubectl get po -n linkerd-cn NAME READY STATUS RESTARTS AGE linkerd-cni-rzp2q 1/1 Running 0 9m20s linkerd-cni-mf564 1/1 Running 0 9m22s From 000807a4372bbe3b364b39e353cf7fa28904222f Mon Sep 17 00:00:00 2001 From: bezarsnba Date: Wed, 18 Feb 2026 09:54:28 -0300 Subject: [PATCH 28/31] update managing egress Signed-off-by: bezarsnba --- linkerd.io/content/2-edge/tasks/managing-egress-traffic.md | 4 ++-- linkerd.io/content/2.17/tasks/managing-egress-traffic.md | 4 ++-- linkerd.io/content/2.18/tasks/managing-egress-traffic.md | 4 ++-- linkerd.io/content/2.19/tasks/managing-egress-traffic.md | 4 ++-- 4 files changed, 8 insertions(+), 8 deletions(-) diff --git a/linkerd.io/content/2-edge/tasks/managing-egress-traffic.md b/linkerd.io/content/2-edge/tasks/managing-egress-traffic.md index e630528157..3b93199101 100644 --- a/linkerd.io/content/2-edge/tasks/managing-egress-traffic.md +++ b/linkerd.io/content/2-edge/tasks/managing-egress-traffic.md @@ -235,7 +235,7 @@ Interestingly enough though, if we go back to our client shell and we try to initiate HTTPS traffic to the same service, it will not be allowed: ```bash -$ curl -v http://httpbin.org/get +$ curl -v https://httpbin.org/get curl: (35) TLS connect error: error:00000000:lib(0)::reason(0) ``` @@ -479,7 +479,7 @@ $ curl https://httpbin.org/ip # arbitrary unencrypted traffic goes to the internal service -$ curl http://google.com +$ curl https://google.com { "requestUID": "in:http-sid:terminus-grpc:-1-h1:80-190120723", "payload": "You cannot go there right now"} diff --git a/linkerd.io/content/2.17/tasks/managing-egress-traffic.md b/linkerd.io/content/2.17/tasks/managing-egress-traffic.md index ab34eb783f..d596ef8112 100644 --- a/linkerd.io/content/2.17/tasks/managing-egress-traffic.md +++ b/linkerd.io/content/2.17/tasks/managing-egress-traffic.md @@ -190,7 +190,7 @@ Interestingly enough though, if we go back to our client shell and we try to initiate HTTPS traffic to the same service, it will not be allowed: ```bash -$ curl -v http://httpbin.org/get +$ curl -v https://httpbin.org/get curl: (35) TLS connect error: error:00000000:lib(0)::reason(0) ``` @@ -434,7 +434,7 @@ $ curl https://httpbin.org/ip # arbitrary unencrypted traffic goes to the internal service -$ curl http://google.com +$ curl https://google.com { "requestUID": "in:http-sid:terminus-grpc:-1-h1:80-190120723", "payload": "You cannot go there right now"} diff --git a/linkerd.io/content/2.18/tasks/managing-egress-traffic.md b/linkerd.io/content/2.18/tasks/managing-egress-traffic.md index f24c292e88..65ffb0cbc5 100644 --- a/linkerd.io/content/2.18/tasks/managing-egress-traffic.md +++ b/linkerd.io/content/2.18/tasks/managing-egress-traffic.md @@ -235,7 +235,7 @@ Interestingly enough though, if we go back to our client shell and we try to initiate HTTPS traffic to the same service, it will not be allowed: ```bash -$ curl -v http://httpbin.org/get +$ curl -v https://httpbin.org/get curl: (35) TLS connect error: error:00000000:lib(0)::reason(0) ``` @@ -479,7 +479,7 @@ $ curl https://httpbin.org/ip # arbitrary unencrypted traffic goes to the internal service -$ curl http://google.com +$ curl https://google.com { "requestUID": "in:http-sid:terminus-grpc:-1-h1:80-190120723", "payload": "You cannot go there right now"} diff --git a/linkerd.io/content/2.19/tasks/managing-egress-traffic.md b/linkerd.io/content/2.19/tasks/managing-egress-traffic.md index a7e9d2492e..3fb62cb5e7 100644 --- a/linkerd.io/content/2.19/tasks/managing-egress-traffic.md +++ b/linkerd.io/content/2.19/tasks/managing-egress-traffic.md @@ -235,7 +235,7 @@ Interestingly enough though, if we go back to our client shell and we try to initiate HTTPS traffic to the same service, it will not be allowed: ```bash -$ curl -v http://httpbin.org/get +$ curl -v https://httpbin.org/get curl: (35) TLS connect error: error:00000000:lib(0)::reason(0) ``` @@ -479,7 +479,7 @@ $ curl https://httpbin.org/ip # arbitrary unencrypted traffic goes to the internal service -$ curl http://google.com +$ curl https://google.com { "requestUID": "in:http-sid:terminus-grpc:-1-h1:80-190120723", "payload": "You cannot go there right now"} From 8f0d56cae1a610ef56653cd0081942cde9e04071 Mon Sep 17 00:00:00 2001 From: bezarsnba Date: Wed, 18 Feb 2026 09:56:49 -0300 Subject: [PATCH 29/31] revert Signed-off-by: bezarsnba --- linkerd.io/content/2-edge/tasks/managing-egress-traffic.md | 2 +- linkerd.io/content/2.17/tasks/managing-egress-traffic.md | 4 ++-- linkerd.io/content/2.18/tasks/managing-egress-traffic.md | 4 ++-- linkerd.io/content/2.19/tasks/managing-egress-traffic.md | 4 ++-- 4 files changed, 7 insertions(+), 7 deletions(-) diff --git a/linkerd.io/content/2-edge/tasks/managing-egress-traffic.md b/linkerd.io/content/2-edge/tasks/managing-egress-traffic.md index 3b93199101..6ff4beedb3 100644 --- a/linkerd.io/content/2-edge/tasks/managing-egress-traffic.md +++ b/linkerd.io/content/2-edge/tasks/managing-egress-traffic.md @@ -479,7 +479,7 @@ $ curl https://httpbin.org/ip # arbitrary unencrypted traffic goes to the internal service -$ curl https://google.com +$ curl http://google.com { "requestUID": "in:http-sid:terminus-grpc:-1-h1:80-190120723", "payload": "You cannot go there right now"} diff --git a/linkerd.io/content/2.17/tasks/managing-egress-traffic.md b/linkerd.io/content/2.17/tasks/managing-egress-traffic.md index d596ef8112..f66ef76e97 100644 --- a/linkerd.io/content/2.17/tasks/managing-egress-traffic.md +++ b/linkerd.io/content/2.17/tasks/managing-egress-traffic.md @@ -413,7 +413,7 @@ Now let's verify all works as expected: ```bash # plaintext traffic goes as expected to the /get path -$ curl http://httpbin.org/get +$ curl http://httpbin.org/get { "args": {}, "headers": { @@ -434,7 +434,7 @@ $ curl https://httpbin.org/ip # arbitrary unencrypted traffic goes to the internal service -$ curl https://google.com +$ curl http://google.com { "requestUID": "in:http-sid:terminus-grpc:-1-h1:80-190120723", "payload": "You cannot go there right now"} diff --git a/linkerd.io/content/2.18/tasks/managing-egress-traffic.md b/linkerd.io/content/2.18/tasks/managing-egress-traffic.md index 65ffb0cbc5..a0fa15d956 100644 --- a/linkerd.io/content/2.18/tasks/managing-egress-traffic.md +++ b/linkerd.io/content/2.18/tasks/managing-egress-traffic.md @@ -458,7 +458,7 @@ Now let's verify all works as expected: ```bash # plaintext traffic goes as expected to the /get path -$ curl http://httpbin.org/get +$ curl http://httpbin.org/get { "args": {}, "headers": { @@ -479,7 +479,7 @@ $ curl https://httpbin.org/ip # arbitrary unencrypted traffic goes to the internal service -$ curl https://google.com +$ curl http://google.com { "requestUID": "in:http-sid:terminus-grpc:-1-h1:80-190120723", "payload": "You cannot go there right now"} diff --git a/linkerd.io/content/2.19/tasks/managing-egress-traffic.md b/linkerd.io/content/2.19/tasks/managing-egress-traffic.md index 3fb62cb5e7..6da4c84e09 100644 --- a/linkerd.io/content/2.19/tasks/managing-egress-traffic.md +++ b/linkerd.io/content/2.19/tasks/managing-egress-traffic.md @@ -458,7 +458,7 @@ Now let's verify all works as expected: ```bash # plaintext traffic goes as expected to the /get path -$ curl http://httpbin.org/get +$ curl http://httpbin.org/get { "args": {}, "headers": { @@ -479,7 +479,7 @@ $ curl https://httpbin.org/ip # arbitrary unencrypted traffic goes to the internal service -$ curl https://google.com +$ curl http://google.com { "requestUID": "in:http-sid:terminus-grpc:-1-h1:80-190120723", "payload": "You cannot go there right now"} From b1c5bac859907631f1b4cbbec96f2237b0f17f54 Mon Sep 17 00:00:00 2001 From: bezarsnba Date: Wed, 18 Feb 2026 09:57:42 -0300 Subject: [PATCH 30/31] remove white space Signed-off-by: bezarsnba --- linkerd.io/content/2.17/tasks/managing-egress-traffic.md | 2 +- linkerd.io/content/2.18/tasks/managing-egress-traffic.md | 2 +- linkerd.io/content/2.19/tasks/managing-egress-traffic.md | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/linkerd.io/content/2.17/tasks/managing-egress-traffic.md b/linkerd.io/content/2.17/tasks/managing-egress-traffic.md index f66ef76e97..6571a4dd42 100644 --- a/linkerd.io/content/2.17/tasks/managing-egress-traffic.md +++ b/linkerd.io/content/2.17/tasks/managing-egress-traffic.md @@ -413,7 +413,7 @@ Now let's verify all works as expected: ```bash # plaintext traffic goes as expected to the /get path -$ curl http://httpbin.org/get +$ curl http://httpbin.org/get { "args": {}, "headers": { diff --git a/linkerd.io/content/2.18/tasks/managing-egress-traffic.md b/linkerd.io/content/2.18/tasks/managing-egress-traffic.md index a0fa15d956..f828a92edf 100644 --- a/linkerd.io/content/2.18/tasks/managing-egress-traffic.md +++ b/linkerd.io/content/2.18/tasks/managing-egress-traffic.md @@ -458,7 +458,7 @@ Now let's verify all works as expected: ```bash # plaintext traffic goes as expected to the /get path -$ curl http://httpbin.org/get +$ curl http://httpbin.org/get { "args": {}, "headers": { diff --git a/linkerd.io/content/2.19/tasks/managing-egress-traffic.md b/linkerd.io/content/2.19/tasks/managing-egress-traffic.md index 6da4c84e09..32e8baee9e 100644 --- a/linkerd.io/content/2.19/tasks/managing-egress-traffic.md +++ b/linkerd.io/content/2.19/tasks/managing-egress-traffic.md @@ -458,7 +458,7 @@ Now let's verify all works as expected: ```bash # plaintext traffic goes as expected to the /get path -$ curl http://httpbin.org/get +$ curl http://httpbin.org/get { "args": {}, "headers": { From 2cf1b056c9a705be0ea429cecac4c7a76b8cadd4 Mon Sep 17 00:00:00 2001 From: bezarsnba Date: Wed, 18 Feb 2026 09:59:23 -0300 Subject: [PATCH 31/31] rever Signed-off-by: bezarsnba --- linkerd.io/content/2-edge/tasks/managing-egress-traffic.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/linkerd.io/content/2-edge/tasks/managing-egress-traffic.md b/linkerd.io/content/2-edge/tasks/managing-egress-traffic.md index 6ff4beedb3..5db5143577 100644 --- a/linkerd.io/content/2-edge/tasks/managing-egress-traffic.md +++ b/linkerd.io/content/2-edge/tasks/managing-egress-traffic.md @@ -458,7 +458,7 @@ Now let's verify all works as expected: ```bash # plaintext traffic goes as expected to the /get path -$ curl http://httpbin.org/get +$ curl http://httpbin.org/get { "args": {}, "headers": {