mirror of
				https://github.com/juanfont/headscale.git
				synced 2025-10-28 10:51:44 +01:00 
			
		
		
		
	Handle errors in integration test setups
Thanks @kev-the-dev Closes #1460 Signed-off-by: Kristoffer Dalby <kristoffer@tailscale.com>
This commit is contained in:
		
							parent
							
								
									63caf9a222
								
							
						
					
					
						commit
						b4a4d0f760
					
				| @ -10,7 +10,7 @@ concurrency: | ||||
|   cancel-in-progress: true | ||||
| 
 | ||||
| jobs: | ||||
|   test: | ||||
|   TestACLAllowStarDst: | ||||
|     runs-on: ubuntu-latest | ||||
| 
 | ||||
|     steps: | ||||
| @ -34,7 +34,7 @@ jobs: | ||||
|             integration_test/ | ||||
|             config-example.yaml | ||||
| 
 | ||||
|       - name: Run general integration tests | ||||
|       - name: Run TestACLAllowStarDst | ||||
|         if: steps.changed-files.outputs.any_changed == 'true' | ||||
|         run: | | ||||
|             nix develop --command -- docker run \ | ||||
|  | ||||
| @ -10,7 +10,7 @@ concurrency: | ||||
|   cancel-in-progress: true | ||||
| 
 | ||||
| jobs: | ||||
|   test: | ||||
|   TestACLAllowUser80Dst: | ||||
|     runs-on: ubuntu-latest | ||||
| 
 | ||||
|     steps: | ||||
| @ -34,7 +34,7 @@ jobs: | ||||
|             integration_test/ | ||||
|             config-example.yaml | ||||
| 
 | ||||
|       - name: Run general integration tests | ||||
|       - name: Run TestACLAllowUser80Dst | ||||
|         if: steps.changed-files.outputs.any_changed == 'true' | ||||
|         run: | | ||||
|             nix develop --command -- docker run \ | ||||
|  | ||||
| @ -10,7 +10,7 @@ concurrency: | ||||
|   cancel-in-progress: true | ||||
| 
 | ||||
| jobs: | ||||
|   test: | ||||
|   TestACLAllowUserDst: | ||||
|     runs-on: ubuntu-latest | ||||
| 
 | ||||
|     steps: | ||||
| @ -34,7 +34,7 @@ jobs: | ||||
|             integration_test/ | ||||
|             config-example.yaml | ||||
| 
 | ||||
|       - name: Run general integration tests | ||||
|       - name: Run TestACLAllowUserDst | ||||
|         if: steps.changed-files.outputs.any_changed == 'true' | ||||
|         run: | | ||||
|             nix develop --command -- docker run \ | ||||
|  | ||||
| @ -10,7 +10,7 @@ concurrency: | ||||
|   cancel-in-progress: true | ||||
| 
 | ||||
| jobs: | ||||
|   test: | ||||
|   TestACLDenyAllPort80: | ||||
|     runs-on: ubuntu-latest | ||||
| 
 | ||||
|     steps: | ||||
| @ -34,7 +34,7 @@ jobs: | ||||
|             integration_test/ | ||||
|             config-example.yaml | ||||
| 
 | ||||
|       - name: Run general integration tests | ||||
|       - name: Run TestACLDenyAllPort80 | ||||
|         if: steps.changed-files.outputs.any_changed == 'true' | ||||
|         run: | | ||||
|             nix develop --command -- docker run \ | ||||
|  | ||||
| @ -10,7 +10,7 @@ concurrency: | ||||
|   cancel-in-progress: true | ||||
| 
 | ||||
| jobs: | ||||
|   test: | ||||
|   TestACLDevice1CanAccessDevice2: | ||||
|     runs-on: ubuntu-latest | ||||
| 
 | ||||
|     steps: | ||||
| @ -34,7 +34,7 @@ jobs: | ||||
|             integration_test/ | ||||
|             config-example.yaml | ||||
| 
 | ||||
|       - name: Run general integration tests | ||||
|       - name: Run TestACLDevice1CanAccessDevice2 | ||||
|         if: steps.changed-files.outputs.any_changed == 'true' | ||||
|         run: | | ||||
|             nix develop --command -- docker run \ | ||||
|  | ||||
| @ -10,7 +10,7 @@ concurrency: | ||||
|   cancel-in-progress: true | ||||
| 
 | ||||
| jobs: | ||||
|   test: | ||||
|   TestACLHostsInNetMapTable: | ||||
|     runs-on: ubuntu-latest | ||||
| 
 | ||||
|     steps: | ||||
| @ -34,7 +34,7 @@ jobs: | ||||
|             integration_test/ | ||||
|             config-example.yaml | ||||
| 
 | ||||
|       - name: Run general integration tests | ||||
|       - name: Run TestACLHostsInNetMapTable | ||||
|         if: steps.changed-files.outputs.any_changed == 'true' | ||||
|         run: | | ||||
|             nix develop --command -- docker run \ | ||||
|  | ||||
| @ -10,7 +10,7 @@ concurrency: | ||||
|   cancel-in-progress: true | ||||
| 
 | ||||
| jobs: | ||||
|   test: | ||||
|   TestACLNamedHostsCanReach: | ||||
|     runs-on: ubuntu-latest | ||||
| 
 | ||||
|     steps: | ||||
| @ -34,7 +34,7 @@ jobs: | ||||
|             integration_test/ | ||||
|             config-example.yaml | ||||
| 
 | ||||
|       - name: Run general integration tests | ||||
|       - name: Run TestACLNamedHostsCanReach | ||||
|         if: steps.changed-files.outputs.any_changed == 'true' | ||||
|         run: | | ||||
|             nix develop --command -- docker run \ | ||||
|  | ||||
| @ -10,7 +10,7 @@ concurrency: | ||||
|   cancel-in-progress: true | ||||
| 
 | ||||
| jobs: | ||||
|   test: | ||||
|   TestACLNamedHostsCanReachBySubnet: | ||||
|     runs-on: ubuntu-latest | ||||
| 
 | ||||
|     steps: | ||||
| @ -34,7 +34,7 @@ jobs: | ||||
|             integration_test/ | ||||
|             config-example.yaml | ||||
| 
 | ||||
|       - name: Run general integration tests | ||||
|       - name: Run TestACLNamedHostsCanReachBySubnet | ||||
|         if: steps.changed-files.outputs.any_changed == 'true' | ||||
|         run: | | ||||
|             nix develop --command -- docker run \ | ||||
|  | ||||
| @ -10,7 +10,7 @@ concurrency: | ||||
|   cancel-in-progress: true | ||||
| 
 | ||||
| jobs: | ||||
|   test: | ||||
|   TestApiKeyCommand: | ||||
|     runs-on: ubuntu-latest | ||||
| 
 | ||||
|     steps: | ||||
| @ -34,7 +34,7 @@ jobs: | ||||
|             integration_test/ | ||||
|             config-example.yaml | ||||
| 
 | ||||
|       - name: Run general integration tests | ||||
|       - name: Run TestApiKeyCommand | ||||
|         if: steps.changed-files.outputs.any_changed == 'true' | ||||
|         run: | | ||||
|             nix develop --command -- docker run \ | ||||
|  | ||||
| @ -10,7 +10,7 @@ concurrency: | ||||
|   cancel-in-progress: true | ||||
| 
 | ||||
| jobs: | ||||
|   test: | ||||
|   TestAuthKeyLogoutAndRelogin: | ||||
|     runs-on: ubuntu-latest | ||||
| 
 | ||||
|     steps: | ||||
| @ -34,7 +34,7 @@ jobs: | ||||
|             integration_test/ | ||||
|             config-example.yaml | ||||
| 
 | ||||
|       - name: Run general integration tests | ||||
|       - name: Run TestAuthKeyLogoutAndRelogin | ||||
|         if: steps.changed-files.outputs.any_changed == 'true' | ||||
|         run: | | ||||
|             nix develop --command -- docker run \ | ||||
|  | ||||
| @ -10,7 +10,7 @@ concurrency: | ||||
|   cancel-in-progress: true | ||||
| 
 | ||||
| jobs: | ||||
|   test: | ||||
|   TestAuthWebFlowAuthenticationPingAll: | ||||
|     runs-on: ubuntu-latest | ||||
| 
 | ||||
|     steps: | ||||
| @ -34,7 +34,7 @@ jobs: | ||||
|             integration_test/ | ||||
|             config-example.yaml | ||||
| 
 | ||||
|       - name: Run general integration tests | ||||
|       - name: Run TestAuthWebFlowAuthenticationPingAll | ||||
|         if: steps.changed-files.outputs.any_changed == 'true' | ||||
|         run: | | ||||
|             nix develop --command -- docker run \ | ||||
|  | ||||
| @ -10,7 +10,7 @@ concurrency: | ||||
|   cancel-in-progress: true | ||||
| 
 | ||||
| jobs: | ||||
|   test: | ||||
|   TestAuthWebFlowLogoutAndRelogin: | ||||
|     runs-on: ubuntu-latest | ||||
| 
 | ||||
|     steps: | ||||
| @ -34,7 +34,7 @@ jobs: | ||||
|             integration_test/ | ||||
|             config-example.yaml | ||||
| 
 | ||||
|       - name: Run general integration tests | ||||
|       - name: Run TestAuthWebFlowLogoutAndRelogin | ||||
|         if: steps.changed-files.outputs.any_changed == 'true' | ||||
|         run: | | ||||
|             nix develop --command -- docker run \ | ||||
|  | ||||
| @ -10,7 +10,7 @@ concurrency: | ||||
|   cancel-in-progress: true | ||||
| 
 | ||||
| jobs: | ||||
|   test: | ||||
|   TestCreateTailscale: | ||||
|     runs-on: ubuntu-latest | ||||
| 
 | ||||
|     steps: | ||||
| @ -34,7 +34,7 @@ jobs: | ||||
|             integration_test/ | ||||
|             config-example.yaml | ||||
| 
 | ||||
|       - name: Run general integration tests | ||||
|       - name: Run TestCreateTailscale | ||||
|         if: steps.changed-files.outputs.any_changed == 'true' | ||||
|         run: | | ||||
|             nix develop --command -- docker run \ | ||||
|  | ||||
| @ -10,7 +10,7 @@ concurrency: | ||||
|   cancel-in-progress: true | ||||
| 
 | ||||
| jobs: | ||||
|   test: | ||||
|   TestDERPServerScenario: | ||||
|     runs-on: ubuntu-latest | ||||
| 
 | ||||
|     steps: | ||||
| @ -34,7 +34,7 @@ jobs: | ||||
|             integration_test/ | ||||
|             config-example.yaml | ||||
| 
 | ||||
|       - name: Run general integration tests | ||||
|       - name: Run TestDERPServerScenario | ||||
|         if: steps.changed-files.outputs.any_changed == 'true' | ||||
|         run: | | ||||
|             nix develop --command -- docker run \ | ||||
|  | ||||
| @ -10,7 +10,7 @@ concurrency: | ||||
|   cancel-in-progress: true | ||||
| 
 | ||||
| jobs: | ||||
|   test: | ||||
|   TestEnablingRoutes: | ||||
|     runs-on: ubuntu-latest | ||||
| 
 | ||||
|     steps: | ||||
| @ -34,7 +34,7 @@ jobs: | ||||
|             integration_test/ | ||||
|             config-example.yaml | ||||
| 
 | ||||
|       - name: Run general integration tests | ||||
|       - name: Run TestEnablingRoutes | ||||
|         if: steps.changed-files.outputs.any_changed == 'true' | ||||
|         run: | | ||||
|             nix develop --command -- docker run \ | ||||
|  | ||||
| @ -10,7 +10,7 @@ concurrency: | ||||
|   cancel-in-progress: true | ||||
| 
 | ||||
| jobs: | ||||
|   test: | ||||
|   TestEphemeral: | ||||
|     runs-on: ubuntu-latest | ||||
| 
 | ||||
|     steps: | ||||
| @ -34,7 +34,7 @@ jobs: | ||||
|             integration_test/ | ||||
|             config-example.yaml | ||||
| 
 | ||||
|       - name: Run general integration tests | ||||
|       - name: Run TestEphemeral | ||||
|         if: steps.changed-files.outputs.any_changed == 'true' | ||||
|         run: | | ||||
|             nix develop --command -- docker run \ | ||||
|  | ||||
| @ -10,7 +10,7 @@ concurrency: | ||||
|   cancel-in-progress: true | ||||
| 
 | ||||
| jobs: | ||||
|   test: | ||||
|   TestExpireNode: | ||||
|     runs-on: ubuntu-latest | ||||
| 
 | ||||
|     steps: | ||||
| @ -34,7 +34,7 @@ jobs: | ||||
|             integration_test/ | ||||
|             config-example.yaml | ||||
| 
 | ||||
|       - name: Run general integration tests | ||||
|       - name: Run TestExpireNode | ||||
|         if: steps.changed-files.outputs.any_changed == 'true' | ||||
|         run: | | ||||
|             nix develop --command -- docker run \ | ||||
|  | ||||
| @ -10,7 +10,7 @@ concurrency: | ||||
|   cancel-in-progress: true | ||||
| 
 | ||||
| jobs: | ||||
|   test: | ||||
|   TestHeadscale: | ||||
|     runs-on: ubuntu-latest | ||||
| 
 | ||||
|     steps: | ||||
| @ -34,7 +34,7 @@ jobs: | ||||
|             integration_test/ | ||||
|             config-example.yaml | ||||
| 
 | ||||
|       - name: Run general integration tests | ||||
|       - name: Run TestHeadscale | ||||
|         if: steps.changed-files.outputs.any_changed == 'true' | ||||
|         run: | | ||||
|             nix develop --command -- docker run \ | ||||
|  | ||||
| @ -10,7 +10,7 @@ concurrency: | ||||
|   cancel-in-progress: true | ||||
| 
 | ||||
| jobs: | ||||
|   test: | ||||
|   TestNodeCommand: | ||||
|     runs-on: ubuntu-latest | ||||
| 
 | ||||
|     steps: | ||||
| @ -34,7 +34,7 @@ jobs: | ||||
|             integration_test/ | ||||
|             config-example.yaml | ||||
| 
 | ||||
|       - name: Run general integration tests | ||||
|       - name: Run TestNodeCommand | ||||
|         if: steps.changed-files.outputs.any_changed == 'true' | ||||
|         run: | | ||||
|             nix develop --command -- docker run \ | ||||
|  | ||||
| @ -10,7 +10,7 @@ concurrency: | ||||
|   cancel-in-progress: true | ||||
| 
 | ||||
| jobs: | ||||
|   test: | ||||
|   TestNodeExpireCommand: | ||||
|     runs-on: ubuntu-latest | ||||
| 
 | ||||
|     steps: | ||||
| @ -34,7 +34,7 @@ jobs: | ||||
|             integration_test/ | ||||
|             config-example.yaml | ||||
| 
 | ||||
|       - name: Run general integration tests | ||||
|       - name: Run TestNodeExpireCommand | ||||
|         if: steps.changed-files.outputs.any_changed == 'true' | ||||
|         run: | | ||||
|             nix develop --command -- docker run \ | ||||
|  | ||||
| @ -10,7 +10,7 @@ concurrency: | ||||
|   cancel-in-progress: true | ||||
| 
 | ||||
| jobs: | ||||
|   test: | ||||
|   TestNodeMoveCommand: | ||||
|     runs-on: ubuntu-latest | ||||
| 
 | ||||
|     steps: | ||||
| @ -34,7 +34,7 @@ jobs: | ||||
|             integration_test/ | ||||
|             config-example.yaml | ||||
| 
 | ||||
|       - name: Run general integration tests | ||||
|       - name: Run TestNodeMoveCommand | ||||
|         if: steps.changed-files.outputs.any_changed == 'true' | ||||
|         run: | | ||||
|             nix develop --command -- docker run \ | ||||
|  | ||||
| @ -10,7 +10,7 @@ concurrency: | ||||
|   cancel-in-progress: true | ||||
| 
 | ||||
| jobs: | ||||
|   test: | ||||
|   TestNodeRenameCommand: | ||||
|     runs-on: ubuntu-latest | ||||
| 
 | ||||
|     steps: | ||||
| @ -34,7 +34,7 @@ jobs: | ||||
|             integration_test/ | ||||
|             config-example.yaml | ||||
| 
 | ||||
|       - name: Run general integration tests | ||||
|       - name: Run TestNodeRenameCommand | ||||
|         if: steps.changed-files.outputs.any_changed == 'true' | ||||
|         run: | | ||||
|             nix develop --command -- docker run \ | ||||
|  | ||||
| @ -10,7 +10,7 @@ concurrency: | ||||
|   cancel-in-progress: true | ||||
| 
 | ||||
| jobs: | ||||
|   test: | ||||
|   TestNodeTagCommand: | ||||
|     runs-on: ubuntu-latest | ||||
| 
 | ||||
|     steps: | ||||
| @ -34,7 +34,7 @@ jobs: | ||||
|             integration_test/ | ||||
|             config-example.yaml | ||||
| 
 | ||||
|       - name: Run general integration tests | ||||
|       - name: Run TestNodeTagCommand | ||||
|         if: steps.changed-files.outputs.any_changed == 'true' | ||||
|         run: | | ||||
|             nix develop --command -- docker run \ | ||||
|  | ||||
| @ -10,7 +10,7 @@ concurrency: | ||||
|   cancel-in-progress: true | ||||
| 
 | ||||
| jobs: | ||||
|   test: | ||||
|   TestOIDCAuthenticationPingAll: | ||||
|     runs-on: ubuntu-latest | ||||
| 
 | ||||
|     steps: | ||||
| @ -34,7 +34,7 @@ jobs: | ||||
|             integration_test/ | ||||
|             config-example.yaml | ||||
| 
 | ||||
|       - name: Run general integration tests | ||||
|       - name: Run TestOIDCAuthenticationPingAll | ||||
|         if: steps.changed-files.outputs.any_changed == 'true' | ||||
|         run: | | ||||
|             nix develop --command -- docker run \ | ||||
|  | ||||
| @ -10,7 +10,7 @@ concurrency: | ||||
|   cancel-in-progress: true | ||||
| 
 | ||||
| jobs: | ||||
|   test: | ||||
|   TestOIDCExpireNodesBasedOnTokenExpiry: | ||||
|     runs-on: ubuntu-latest | ||||
| 
 | ||||
|     steps: | ||||
| @ -34,7 +34,7 @@ jobs: | ||||
|             integration_test/ | ||||
|             config-example.yaml | ||||
| 
 | ||||
|       - name: Run general integration tests | ||||
|       - name: Run TestOIDCExpireNodesBasedOnTokenExpiry | ||||
|         if: steps.changed-files.outputs.any_changed == 'true' | ||||
|         run: | | ||||
|             nix develop --command -- docker run \ | ||||
|  | ||||
| @ -10,7 +10,7 @@ concurrency: | ||||
|   cancel-in-progress: true | ||||
| 
 | ||||
| jobs: | ||||
|   test: | ||||
|   TestPingAllByHostname: | ||||
|     runs-on: ubuntu-latest | ||||
| 
 | ||||
|     steps: | ||||
| @ -34,7 +34,7 @@ jobs: | ||||
|             integration_test/ | ||||
|             config-example.yaml | ||||
| 
 | ||||
|       - name: Run general integration tests | ||||
|       - name: Run TestPingAllByHostname | ||||
|         if: steps.changed-files.outputs.any_changed == 'true' | ||||
|         run: | | ||||
|             nix develop --command -- docker run \ | ||||
|  | ||||
| @ -10,7 +10,7 @@ concurrency: | ||||
|   cancel-in-progress: true | ||||
| 
 | ||||
| jobs: | ||||
|   test: | ||||
|   TestPingAllByIP: | ||||
|     runs-on: ubuntu-latest | ||||
| 
 | ||||
|     steps: | ||||
| @ -34,7 +34,7 @@ jobs: | ||||
|             integration_test/ | ||||
|             config-example.yaml | ||||
| 
 | ||||
|       - name: Run general integration tests | ||||
|       - name: Run TestPingAllByIP | ||||
|         if: steps.changed-files.outputs.any_changed == 'true' | ||||
|         run: | | ||||
|             nix develop --command -- docker run \ | ||||
|  | ||||
| @ -10,7 +10,7 @@ concurrency: | ||||
|   cancel-in-progress: true | ||||
| 
 | ||||
| jobs: | ||||
|   test: | ||||
|   TestPreAuthKeyCommand: | ||||
|     runs-on: ubuntu-latest | ||||
| 
 | ||||
|     steps: | ||||
| @ -34,7 +34,7 @@ jobs: | ||||
|             integration_test/ | ||||
|             config-example.yaml | ||||
| 
 | ||||
|       - name: Run general integration tests | ||||
|       - name: Run TestPreAuthKeyCommand | ||||
|         if: steps.changed-files.outputs.any_changed == 'true' | ||||
|         run: | | ||||
|             nix develop --command -- docker run \ | ||||
|  | ||||
| @ -10,7 +10,7 @@ concurrency: | ||||
|   cancel-in-progress: true | ||||
| 
 | ||||
| jobs: | ||||
|   test: | ||||
|   TestPreAuthKeyCommandReusableEphemeral: | ||||
|     runs-on: ubuntu-latest | ||||
| 
 | ||||
|     steps: | ||||
| @ -34,7 +34,7 @@ jobs: | ||||
|             integration_test/ | ||||
|             config-example.yaml | ||||
| 
 | ||||
|       - name: Run general integration tests | ||||
|       - name: Run TestPreAuthKeyCommandReusableEphemeral | ||||
|         if: steps.changed-files.outputs.any_changed == 'true' | ||||
|         run: | | ||||
|             nix develop --command -- docker run \ | ||||
|  | ||||
| @ -10,7 +10,7 @@ concurrency: | ||||
|   cancel-in-progress: true | ||||
| 
 | ||||
| jobs: | ||||
|   test: | ||||
|   TestPreAuthKeyCommandWithoutExpiry: | ||||
|     runs-on: ubuntu-latest | ||||
| 
 | ||||
|     steps: | ||||
| @ -34,7 +34,7 @@ jobs: | ||||
|             integration_test/ | ||||
|             config-example.yaml | ||||
| 
 | ||||
|       - name: Run general integration tests | ||||
|       - name: Run TestPreAuthKeyCommandWithoutExpiry | ||||
|         if: steps.changed-files.outputs.any_changed == 'true' | ||||
|         run: | | ||||
|             nix develop --command -- docker run \ | ||||
|  | ||||
| @ -10,7 +10,7 @@ concurrency: | ||||
|   cancel-in-progress: true | ||||
| 
 | ||||
| jobs: | ||||
|   test: | ||||
|   TestResolveMagicDNS: | ||||
|     runs-on: ubuntu-latest | ||||
| 
 | ||||
|     steps: | ||||
| @ -34,7 +34,7 @@ jobs: | ||||
|             integration_test/ | ||||
|             config-example.yaml | ||||
| 
 | ||||
|       - name: Run general integration tests | ||||
|       - name: Run TestResolveMagicDNS | ||||
|         if: steps.changed-files.outputs.any_changed == 'true' | ||||
|         run: | | ||||
|             nix develop --command -- docker run \ | ||||
|  | ||||
| @ -10,7 +10,7 @@ concurrency: | ||||
|   cancel-in-progress: true | ||||
| 
 | ||||
| jobs: | ||||
|   test: | ||||
|   TestSSHIsBlockedInACL: | ||||
|     runs-on: ubuntu-latest | ||||
| 
 | ||||
|     steps: | ||||
| @ -34,7 +34,7 @@ jobs: | ||||
|             integration_test/ | ||||
|             config-example.yaml | ||||
| 
 | ||||
|       - name: Run general integration tests | ||||
|       - name: Run TestSSHIsBlockedInACL | ||||
|         if: steps.changed-files.outputs.any_changed == 'true' | ||||
|         run: | | ||||
|             nix develop --command -- docker run \ | ||||
|  | ||||
| @ -10,7 +10,7 @@ concurrency: | ||||
|   cancel-in-progress: true | ||||
| 
 | ||||
| jobs: | ||||
|   test: | ||||
|   TestSSHMultipleUsersAllToAll: | ||||
|     runs-on: ubuntu-latest | ||||
| 
 | ||||
|     steps: | ||||
| @ -34,7 +34,7 @@ jobs: | ||||
|             integration_test/ | ||||
|             config-example.yaml | ||||
| 
 | ||||
|       - name: Run general integration tests | ||||
|       - name: Run TestSSHMultipleUsersAllToAll | ||||
|         if: steps.changed-files.outputs.any_changed == 'true' | ||||
|         run: | | ||||
|             nix develop --command -- docker run \ | ||||
|  | ||||
| @ -10,7 +10,7 @@ concurrency: | ||||
|   cancel-in-progress: true | ||||
| 
 | ||||
| jobs: | ||||
|   test: | ||||
|   TestSSHNoSSHConfigured: | ||||
|     runs-on: ubuntu-latest | ||||
| 
 | ||||
|     steps: | ||||
| @ -34,7 +34,7 @@ jobs: | ||||
|             integration_test/ | ||||
|             config-example.yaml | ||||
| 
 | ||||
|       - name: Run general integration tests | ||||
|       - name: Run TestSSHNoSSHConfigured | ||||
|         if: steps.changed-files.outputs.any_changed == 'true' | ||||
|         run: | | ||||
|             nix develop --command -- docker run \ | ||||
|  | ||||
| @ -10,7 +10,7 @@ concurrency: | ||||
|   cancel-in-progress: true | ||||
| 
 | ||||
| jobs: | ||||
|   test: | ||||
|   TestSSHOneUserAllToAll: | ||||
|     runs-on: ubuntu-latest | ||||
| 
 | ||||
|     steps: | ||||
| @ -34,7 +34,7 @@ jobs: | ||||
|             integration_test/ | ||||
|             config-example.yaml | ||||
| 
 | ||||
|       - name: Run general integration tests | ||||
|       - name: Run TestSSHOneUserAllToAll | ||||
|         if: steps.changed-files.outputs.any_changed == 'true' | ||||
|         run: | | ||||
|             nix develop --command -- docker run \ | ||||
|  | ||||
| @ -10,7 +10,7 @@ concurrency: | ||||
|   cancel-in-progress: true | ||||
| 
 | ||||
| jobs: | ||||
|   test: | ||||
|   TestSSUserOnlyIsolation: | ||||
|     runs-on: ubuntu-latest | ||||
| 
 | ||||
|     steps: | ||||
| @ -34,7 +34,7 @@ jobs: | ||||
|             integration_test/ | ||||
|             config-example.yaml | ||||
| 
 | ||||
|       - name: Run general integration tests | ||||
|       - name: Run TestSSUserOnlyIsolation | ||||
|         if: steps.changed-files.outputs.any_changed == 'true' | ||||
|         run: | | ||||
|             nix develop --command -- docker run \ | ||||
|  | ||||
| @ -10,7 +10,7 @@ concurrency: | ||||
|   cancel-in-progress: true | ||||
| 
 | ||||
| jobs: | ||||
|   test: | ||||
|   TestTaildrop: | ||||
|     runs-on: ubuntu-latest | ||||
| 
 | ||||
|     steps: | ||||
| @ -34,7 +34,7 @@ jobs: | ||||
|             integration_test/ | ||||
|             config-example.yaml | ||||
| 
 | ||||
|       - name: Run general integration tests | ||||
|       - name: Run TestTaildrop | ||||
|         if: steps.changed-files.outputs.any_changed == 'true' | ||||
|         run: | | ||||
|             nix develop --command -- docker run \ | ||||
|  | ||||
| @ -10,7 +10,7 @@ concurrency: | ||||
|   cancel-in-progress: true | ||||
| 
 | ||||
| jobs: | ||||
|   test: | ||||
|   TestTailscaleNodesJoiningHeadcale: | ||||
|     runs-on: ubuntu-latest | ||||
| 
 | ||||
|     steps: | ||||
| @ -34,7 +34,7 @@ jobs: | ||||
|             integration_test/ | ||||
|             config-example.yaml | ||||
| 
 | ||||
|       - name: Run general integration tests | ||||
|       - name: Run TestTailscaleNodesJoiningHeadcale | ||||
|         if: steps.changed-files.outputs.any_changed == 'true' | ||||
|         run: | | ||||
|             nix develop --command -- docker run \ | ||||
|  | ||||
| @ -10,7 +10,7 @@ concurrency: | ||||
|   cancel-in-progress: true | ||||
| 
 | ||||
| jobs: | ||||
|   test: | ||||
|   TestUserCommand: | ||||
|     runs-on: ubuntu-latest | ||||
| 
 | ||||
|     steps: | ||||
| @ -34,7 +34,7 @@ jobs: | ||||
|             integration_test/ | ||||
|             config-example.yaml | ||||
| 
 | ||||
|       - name: Run general integration tests | ||||
|       - name: Run TestUserCommand | ||||
|         if: steps.changed-files.outputs.any_changed == 'true' | ||||
|         run: | | ||||
|             nix develop --command -- docker run \ | ||||
|  | ||||
| @ -1,16 +0,0 @@ | ||||
| FROM ubuntu:22.04 | ||||
| 
 | ||||
| ARG TAILSCALE_VERSION=* | ||||
| ARG TAILSCALE_CHANNEL=stable | ||||
| 
 | ||||
| RUN apt-get update \ | ||||
|     && apt-get install -y gnupg curl ssh dnsutils ca-certificates \ | ||||
|     && adduser --shell=/bin/bash ssh-it-user | ||||
| 
 | ||||
| # Tailscale is deliberately split into a second stage so we can cash utils as a seperate layer. | ||||
| RUN curl -fsSL https://pkgs.tailscale.com/${TAILSCALE_CHANNEL}/ubuntu/focal.gpg | apt-key add - \ | ||||
|     && curl -fsSL https://pkgs.tailscale.com/${TAILSCALE_CHANNEL}/ubuntu/focal.list | tee /etc/apt/sources.list.d/tailscale.list \ | ||||
|     && apt-get update \ | ||||
|     && apt-get install -y tailscale=${TAILSCALE_VERSION} \ | ||||
|     && apt-get clean \ | ||||
|     && rm -rf /var/lib/apt/lists/* | ||||
| @ -31,7 +31,7 @@ concurrency: | ||||
|   cancel-in-progress: true | ||||
| 
 | ||||
| jobs: | ||||
|   test: | ||||
|   {{.Name}}: | ||||
|     runs-on: ubuntu-latest | ||||
| 
 | ||||
|     steps: | ||||
| @ -55,7 +55,7 @@ jobs: | ||||
|             integration_test/ | ||||
|             config-example.yaml | ||||
| 
 | ||||
|       - name: Run general integration tests | ||||
|       - name: Run {{.Name}} | ||||
|         if: steps.changed-files.outputs.any_changed == 'true' | ||||
|         run: | | ||||
|             nix develop --command -- docker run \ | ||||
|  | ||||
| @ -45,10 +45,14 @@ var veryLargeDestination = []string{ | ||||
| 	"208.0.0.0/4:*", | ||||
| } | ||||
| 
 | ||||
| func aclScenario(t *testing.T, policy *policy.ACLPolicy, clientsPerUser int) *Scenario { | ||||
| func aclScenario( | ||||
| 	t *testing.T, | ||||
| 	policy *policy.ACLPolicy, | ||||
| 	clientsPerUser int, | ||||
| ) *Scenario { | ||||
| 	t.Helper() | ||||
| 	scenario, err := NewScenario() | ||||
| 	assert.NoError(t, err) | ||||
| 	assertNoErr(t, err) | ||||
| 
 | ||||
| 	spec := map[string]int{ | ||||
| 		"user1": clientsPerUser, | ||||
| @ -58,22 +62,19 @@ func aclScenario(t *testing.T, policy *policy.ACLPolicy, clientsPerUser int) *Sc | ||||
| 	err = scenario.CreateHeadscaleEnv(spec, | ||||
| 		[]tsic.Option{ | ||||
| 			tsic.WithDockerEntrypoint([]string{ | ||||
| 				"/bin/bash", | ||||
| 				"/bin/sh", | ||||
| 				"-c", | ||||
| 				"/bin/sleep 3 ; update-ca-certificates ; python3 -m http.server --bind :: 80 & tailscaled --tun=tsdev", | ||||
| 				"/bin/sleep 3 ; apk add python3 curl ; update-ca-certificates ; python3 -m http.server --bind :: 80 & tailscaled --tun=tsdev", | ||||
| 			}), | ||||
| 			tsic.WithDockerWorkdir("/"), | ||||
| 		}, | ||||
| 		hsic.WithACLPolicy(policy), | ||||
| 		hsic.WithTestName("acl"), | ||||
| 	) | ||||
| 	assert.NoError(t, err) | ||||
| 
 | ||||
| 	err = scenario.WaitForTailscaleSync() | ||||
| 	assert.NoError(t, err) | ||||
| 	assertNoErr(t, err) | ||||
| 
 | ||||
| 	_, err = scenario.ListTailscaleClientsFQDNs() | ||||
| 	assert.NoError(t, err) | ||||
| 	assertNoErrListFQDN(t, err) | ||||
| 
 | ||||
| 	return scenario | ||||
| } | ||||
| @ -260,7 +261,7 @@ func TestACLHostsInNetMapTable(t *testing.T) { | ||||
| 	for name, testCase := range tests { | ||||
| 		t.Run(name, func(t *testing.T) { | ||||
| 			scenario, err := NewScenario() | ||||
| 			assert.NoError(t, err) | ||||
| 			assertNoErr(t, err) | ||||
| 
 | ||||
| 			spec := testCase.users | ||||
| 
 | ||||
| @ -268,25 +269,23 @@ func TestACLHostsInNetMapTable(t *testing.T) { | ||||
| 				[]tsic.Option{}, | ||||
| 				hsic.WithACLPolicy(&testCase.policy), | ||||
| 			) | ||||
| 			assert.NoError(t, err) | ||||
| 			assertNoErr(t, err) | ||||
| 			defer scenario.Shutdown() | ||||
| 
 | ||||
| 			allClients, err := scenario.ListTailscaleClients() | ||||
| 			assert.NoError(t, err) | ||||
| 			assertNoErr(t, err) | ||||
| 
 | ||||
| 			err = scenario.WaitForTailscaleSync() | ||||
| 			assert.NoError(t, err) | ||||
| 			err = scenario.WaitForTailscaleSyncWithPeerCount(testCase.want["user1"]) | ||||
| 			assertNoErrSync(t, err) | ||||
| 
 | ||||
| 			for _, client := range allClients { | ||||
| 				status, err := client.Status() | ||||
| 				assert.NoError(t, err) | ||||
| 				assertNoErr(t, err) | ||||
| 
 | ||||
| 				user := status.User[status.Self.UserID].LoginName | ||||
| 
 | ||||
| 				assert.Equal(t, (testCase.want[user]), len(status.Peer)) | ||||
| 			} | ||||
| 
 | ||||
| 			err = scenario.Shutdown() | ||||
| 			assert.NoError(t, err) | ||||
| 		}) | ||||
| 	} | ||||
| } | ||||
| @ -311,25 +310,26 @@ func TestACLAllowUser80Dst(t *testing.T) { | ||||
| 		}, | ||||
| 		1, | ||||
| 	) | ||||
| 	defer scenario.Shutdown() | ||||
| 
 | ||||
| 	user1Clients, err := scenario.ListTailscaleClients("user1") | ||||
| 	assert.NoError(t, err) | ||||
| 	assertNoErr(t, err) | ||||
| 
 | ||||
| 	user2Clients, err := scenario.ListTailscaleClients("user2") | ||||
| 	assert.NoError(t, err) | ||||
| 	assertNoErr(t, err) | ||||
| 
 | ||||
| 	// Test that user1 can visit all user2
 | ||||
| 	for _, client := range user1Clients { | ||||
| 		for _, peer := range user2Clients { | ||||
| 			fqdn, err := peer.FQDN() | ||||
| 			assert.NoError(t, err) | ||||
| 			assertNoErr(t, err) | ||||
| 
 | ||||
| 			url := fmt.Sprintf("http://%s/etc/hostname", fqdn) | ||||
| 			t.Logf("url from %s to %s", client.Hostname(), url) | ||||
| 
 | ||||
| 			result, err := client.Curl(url) | ||||
| 			assert.Len(t, result, 13) | ||||
| 			assert.NoError(t, err) | ||||
| 			assertNoErr(t, err) | ||||
| 		} | ||||
| 	} | ||||
| 
 | ||||
| @ -337,7 +337,7 @@ func TestACLAllowUser80Dst(t *testing.T) { | ||||
| 	for _, client := range user2Clients { | ||||
| 		for _, peer := range user1Clients { | ||||
| 			fqdn, err := peer.FQDN() | ||||
| 			assert.NoError(t, err) | ||||
| 			assertNoErr(t, err) | ||||
| 
 | ||||
| 			url := fmt.Sprintf("http://%s/etc/hostname", fqdn) | ||||
| 			t.Logf("url from %s to %s", client.Hostname(), url) | ||||
| @ -347,9 +347,6 @@ func TestACLAllowUser80Dst(t *testing.T) { | ||||
| 			assert.Error(t, err) | ||||
| 		} | ||||
| 	} | ||||
| 
 | ||||
| 	err = scenario.Shutdown() | ||||
| 	assert.NoError(t, err) | ||||
| } | ||||
| 
 | ||||
| func TestACLDenyAllPort80(t *testing.T) { | ||||
| @ -370,12 +367,13 @@ func TestACLDenyAllPort80(t *testing.T) { | ||||
| 		}, | ||||
| 		4, | ||||
| 	) | ||||
| 	defer scenario.Shutdown() | ||||
| 
 | ||||
| 	allClients, err := scenario.ListTailscaleClients() | ||||
| 	assert.NoError(t, err) | ||||
| 	assertNoErr(t, err) | ||||
| 
 | ||||
| 	allHostnames, err := scenario.ListTailscaleClientsFQDNs() | ||||
| 	assert.NoError(t, err) | ||||
| 	assertNoErr(t, err) | ||||
| 
 | ||||
| 	for _, client := range allClients { | ||||
| 		for _, hostname := range allHostnames { | ||||
| @ -393,9 +391,6 @@ func TestACLDenyAllPort80(t *testing.T) { | ||||
| 			assert.Error(t, err) | ||||
| 		} | ||||
| 	} | ||||
| 
 | ||||
| 	err = scenario.Shutdown() | ||||
| 	assert.NoError(t, err) | ||||
| } | ||||
| 
 | ||||
| // Test to confirm that we can use user:* from one user.
 | ||||
| @ -416,25 +411,26 @@ func TestACLAllowUserDst(t *testing.T) { | ||||
| 		}, | ||||
| 		2, | ||||
| 	) | ||||
| 	defer scenario.Shutdown() | ||||
| 
 | ||||
| 	user1Clients, err := scenario.ListTailscaleClients("user1") | ||||
| 	assert.NoError(t, err) | ||||
| 	assertNoErr(t, err) | ||||
| 
 | ||||
| 	user2Clients, err := scenario.ListTailscaleClients("user2") | ||||
| 	assert.NoError(t, err) | ||||
| 	assertNoErr(t, err) | ||||
| 
 | ||||
| 	// Test that user1 can visit all user2
 | ||||
| 	for _, client := range user1Clients { | ||||
| 		for _, peer := range user2Clients { | ||||
| 			fqdn, err := peer.FQDN() | ||||
| 			assert.NoError(t, err) | ||||
| 			assertNoErr(t, err) | ||||
| 
 | ||||
| 			url := fmt.Sprintf("http://%s/etc/hostname", fqdn) | ||||
| 			t.Logf("url from %s to %s", client.Hostname(), url) | ||||
| 
 | ||||
| 			result, err := client.Curl(url) | ||||
| 			assert.Len(t, result, 13) | ||||
| 			assert.NoError(t, err) | ||||
| 			assertNoErr(t, err) | ||||
| 		} | ||||
| 	} | ||||
| 
 | ||||
| @ -442,7 +438,7 @@ func TestACLAllowUserDst(t *testing.T) { | ||||
| 	for _, client := range user2Clients { | ||||
| 		for _, peer := range user1Clients { | ||||
| 			fqdn, err := peer.FQDN() | ||||
| 			assert.NoError(t, err) | ||||
| 			assertNoErr(t, err) | ||||
| 
 | ||||
| 			url := fmt.Sprintf("http://%s/etc/hostname", fqdn) | ||||
| 			t.Logf("url from %s to %s", client.Hostname(), url) | ||||
| @ -452,9 +448,6 @@ func TestACLAllowUserDst(t *testing.T) { | ||||
| 			assert.Error(t, err) | ||||
| 		} | ||||
| 	} | ||||
| 
 | ||||
| 	err = scenario.Shutdown() | ||||
| 	assert.NoError(t, err) | ||||
| } | ||||
| 
 | ||||
| // Test to confirm that we can use *:* from one user
 | ||||
| @ -474,25 +467,26 @@ func TestACLAllowStarDst(t *testing.T) { | ||||
| 		}, | ||||
| 		2, | ||||
| 	) | ||||
| 	defer scenario.Shutdown() | ||||
| 
 | ||||
| 	user1Clients, err := scenario.ListTailscaleClients("user1") | ||||
| 	assert.NoError(t, err) | ||||
| 	assertNoErr(t, err) | ||||
| 
 | ||||
| 	user2Clients, err := scenario.ListTailscaleClients("user2") | ||||
| 	assert.NoError(t, err) | ||||
| 	assertNoErr(t, err) | ||||
| 
 | ||||
| 	// Test that user1 can visit all user2
 | ||||
| 	for _, client := range user1Clients { | ||||
| 		for _, peer := range user2Clients { | ||||
| 			fqdn, err := peer.FQDN() | ||||
| 			assert.NoError(t, err) | ||||
| 			assertNoErr(t, err) | ||||
| 
 | ||||
| 			url := fmt.Sprintf("http://%s/etc/hostname", fqdn) | ||||
| 			t.Logf("url from %s to %s", client.Hostname(), url) | ||||
| 
 | ||||
| 			result, err := client.Curl(url) | ||||
| 			assert.Len(t, result, 13) | ||||
| 			assert.NoError(t, err) | ||||
| 			assertNoErr(t, err) | ||||
| 		} | ||||
| 	} | ||||
| 
 | ||||
| @ -500,7 +494,7 @@ func TestACLAllowStarDst(t *testing.T) { | ||||
| 	for _, client := range user2Clients { | ||||
| 		for _, peer := range user1Clients { | ||||
| 			fqdn, err := peer.FQDN() | ||||
| 			assert.NoError(t, err) | ||||
| 			assertNoErr(t, err) | ||||
| 
 | ||||
| 			url := fmt.Sprintf("http://%s/etc/hostname", fqdn) | ||||
| 			t.Logf("url from %s to %s", client.Hostname(), url) | ||||
| @ -510,9 +504,6 @@ func TestACLAllowStarDst(t *testing.T) { | ||||
| 			assert.Error(t, err) | ||||
| 		} | ||||
| 	} | ||||
| 
 | ||||
| 	err = scenario.Shutdown() | ||||
| 	assert.NoError(t, err) | ||||
| } | ||||
| 
 | ||||
| // TestACLNamedHostsCanReachBySubnet is the same as
 | ||||
| @ -537,25 +528,26 @@ func TestACLNamedHostsCanReachBySubnet(t *testing.T) { | ||||
| 		}, | ||||
| 		3, | ||||
| 	) | ||||
| 	defer scenario.Shutdown() | ||||
| 
 | ||||
| 	user1Clients, err := scenario.ListTailscaleClients("user1") | ||||
| 	assert.NoError(t, err) | ||||
| 	assertNoErr(t, err) | ||||
| 
 | ||||
| 	user2Clients, err := scenario.ListTailscaleClients("user2") | ||||
| 	assert.NoError(t, err) | ||||
| 	assertNoErr(t, err) | ||||
| 
 | ||||
| 	// Test that user1 can visit all user2
 | ||||
| 	for _, client := range user1Clients { | ||||
| 		for _, peer := range user2Clients { | ||||
| 			fqdn, err := peer.FQDN() | ||||
| 			assert.NoError(t, err) | ||||
| 			assertNoErr(t, err) | ||||
| 
 | ||||
| 			url := fmt.Sprintf("http://%s/etc/hostname", fqdn) | ||||
| 			t.Logf("url from %s to %s", client.Hostname(), url) | ||||
| 
 | ||||
| 			result, err := client.Curl(url) | ||||
| 			assert.Len(t, result, 13) | ||||
| 			assert.NoError(t, err) | ||||
| 			assertNoErr(t, err) | ||||
| 		} | ||||
| 	} | ||||
| 
 | ||||
| @ -563,19 +555,16 @@ func TestACLNamedHostsCanReachBySubnet(t *testing.T) { | ||||
| 	for _, client := range user2Clients { | ||||
| 		for _, peer := range user1Clients { | ||||
| 			fqdn, err := peer.FQDN() | ||||
| 			assert.NoError(t, err) | ||||
| 			assertNoErr(t, err) | ||||
| 
 | ||||
| 			url := fmt.Sprintf("http://%s/etc/hostname", fqdn) | ||||
| 			t.Logf("url from %s to %s", client.Hostname(), url) | ||||
| 
 | ||||
| 			result, err := client.Curl(url) | ||||
| 			assert.Len(t, result, 13) | ||||
| 			assert.NoError(t, err) | ||||
| 			assertNoErr(t, err) | ||||
| 		} | ||||
| 	} | ||||
| 
 | ||||
| 	err = scenario.Shutdown() | ||||
| 	assert.NoError(t, err) | ||||
| } | ||||
| 
 | ||||
| // This test aims to cover cases where individual hosts are allowed and denied
 | ||||
| @ -677,16 +666,17 @@ func TestACLNamedHostsCanReach(t *testing.T) { | ||||
| 				&testCase.policy, | ||||
| 				2, | ||||
| 			) | ||||
| 			defer scenario.Shutdown() | ||||
| 
 | ||||
| 			// Since user/users dont matter here, we basically expect that some clients
 | ||||
| 			// will be assigned these ips and that we can pick them up for our own use.
 | ||||
| 			test1ip4 := netip.MustParseAddr("100.64.0.1") | ||||
| 			test1ip6 := netip.MustParseAddr("fd7a:115c:a1e0::1") | ||||
| 			test1, err := scenario.FindTailscaleClientByIP(test1ip6) | ||||
| 			assert.NoError(t, err) | ||||
| 			assertNoErr(t, err) | ||||
| 
 | ||||
| 			test1fqdn, err := test1.FQDN() | ||||
| 			assert.NoError(t, err) | ||||
| 			assertNoErr(t, err) | ||||
| 			test1ip4URL := fmt.Sprintf("http://%s/etc/hostname", test1ip4.String()) | ||||
| 			test1ip6URL := fmt.Sprintf("http://[%s]/etc/hostname", test1ip6.String()) | ||||
| 			test1fqdnURL := fmt.Sprintf("http://%s/etc/hostname", test1fqdn) | ||||
| @ -694,10 +684,10 @@ func TestACLNamedHostsCanReach(t *testing.T) { | ||||
| 			test2ip4 := netip.MustParseAddr("100.64.0.2") | ||||
| 			test2ip6 := netip.MustParseAddr("fd7a:115c:a1e0::2") | ||||
| 			test2, err := scenario.FindTailscaleClientByIP(test2ip6) | ||||
| 			assert.NoError(t, err) | ||||
| 			assertNoErr(t, err) | ||||
| 
 | ||||
| 			test2fqdn, err := test2.FQDN() | ||||
| 			assert.NoError(t, err) | ||||
| 			assertNoErr(t, err) | ||||
| 			test2ip4URL := fmt.Sprintf("http://%s/etc/hostname", test2ip4.String()) | ||||
| 			test2ip6URL := fmt.Sprintf("http://[%s]/etc/hostname", test2ip6.String()) | ||||
| 			test2fqdnURL := fmt.Sprintf("http://%s/etc/hostname", test2fqdn) | ||||
| @ -705,10 +695,10 @@ func TestACLNamedHostsCanReach(t *testing.T) { | ||||
| 			test3ip4 := netip.MustParseAddr("100.64.0.3") | ||||
| 			test3ip6 := netip.MustParseAddr("fd7a:115c:a1e0::3") | ||||
| 			test3, err := scenario.FindTailscaleClientByIP(test3ip6) | ||||
| 			assert.NoError(t, err) | ||||
| 			assertNoErr(t, err) | ||||
| 
 | ||||
| 			test3fqdn, err := test3.FQDN() | ||||
| 			assert.NoError(t, err) | ||||
| 			assertNoErr(t, err) | ||||
| 			test3ip4URL := fmt.Sprintf("http://%s/etc/hostname", test3ip4.String()) | ||||
| 			test3ip6URL := fmt.Sprintf("http://[%s]/etc/hostname", test3ip6.String()) | ||||
| 			test3fqdnURL := fmt.Sprintf("http://%s/etc/hostname", test3fqdn) | ||||
| @ -723,7 +713,7 @@ func TestACLNamedHostsCanReach(t *testing.T) { | ||||
| 				test3ip4URL, | ||||
| 				result, | ||||
| 			) | ||||
| 			assert.NoError(t, err) | ||||
| 			assertNoErr(t, err) | ||||
| 
 | ||||
| 			result, err = test1.Curl(test3ip6URL) | ||||
| 			assert.Lenf( | ||||
| @ -734,7 +724,7 @@ func TestACLNamedHostsCanReach(t *testing.T) { | ||||
| 				test3ip6URL, | ||||
| 				result, | ||||
| 			) | ||||
| 			assert.NoError(t, err) | ||||
| 			assertNoErr(t, err) | ||||
| 
 | ||||
| 			result, err = test1.Curl(test3fqdnURL) | ||||
| 			assert.Lenf( | ||||
| @ -745,7 +735,7 @@ func TestACLNamedHostsCanReach(t *testing.T) { | ||||
| 				test3fqdnURL, | ||||
| 				result, | ||||
| 			) | ||||
| 			assert.NoError(t, err) | ||||
| 			assertNoErr(t, err) | ||||
| 
 | ||||
| 			// test2 can query test3
 | ||||
| 			result, err = test2.Curl(test3ip4URL) | ||||
| @ -757,7 +747,7 @@ func TestACLNamedHostsCanReach(t *testing.T) { | ||||
| 				test3ip4URL, | ||||
| 				result, | ||||
| 			) | ||||
| 			assert.NoError(t, err) | ||||
| 			assertNoErr(t, err) | ||||
| 
 | ||||
| 			result, err = test2.Curl(test3ip6URL) | ||||
| 			assert.Lenf( | ||||
| @ -768,7 +758,7 @@ func TestACLNamedHostsCanReach(t *testing.T) { | ||||
| 				test3ip6URL, | ||||
| 				result, | ||||
| 			) | ||||
| 			assert.NoError(t, err) | ||||
| 			assertNoErr(t, err) | ||||
| 
 | ||||
| 			result, err = test2.Curl(test3fqdnURL) | ||||
| 			assert.Lenf( | ||||
| @ -779,7 +769,7 @@ func TestACLNamedHostsCanReach(t *testing.T) { | ||||
| 				test3fqdnURL, | ||||
| 				result, | ||||
| 			) | ||||
| 			assert.NoError(t, err) | ||||
| 			assertNoErr(t, err) | ||||
| 
 | ||||
| 			// test3 cannot query test1
 | ||||
| 			result, err = test3.Curl(test1ip4URL) | ||||
| @ -818,7 +808,7 @@ func TestACLNamedHostsCanReach(t *testing.T) { | ||||
| 				result, | ||||
| 			) | ||||
| 
 | ||||
| 			assert.NoError(t, err) | ||||
| 			assertNoErr(t, err) | ||||
| 			result, err = test1.Curl(test2ip6URL) | ||||
| 			assert.Lenf( | ||||
| 				t, | ||||
| @ -828,7 +818,7 @@ func TestACLNamedHostsCanReach(t *testing.T) { | ||||
| 				test2ip6URL, | ||||
| 				result, | ||||
| 			) | ||||
| 			assert.NoError(t, err) | ||||
| 			assertNoErr(t, err) | ||||
| 
 | ||||
| 			result, err = test1.Curl(test2fqdnURL) | ||||
| 			assert.Lenf( | ||||
| @ -839,7 +829,7 @@ func TestACLNamedHostsCanReach(t *testing.T) { | ||||
| 				test2fqdnURL, | ||||
| 				result, | ||||
| 			) | ||||
| 			assert.NoError(t, err) | ||||
| 			assertNoErr(t, err) | ||||
| 
 | ||||
| 			// test2 cannot query test1
 | ||||
| 			result, err = test2.Curl(test1ip4URL) | ||||
| @ -853,9 +843,6 @@ func TestACLNamedHostsCanReach(t *testing.T) { | ||||
| 			result, err = test2.Curl(test1fqdnURL) | ||||
| 			assert.Empty(t, result) | ||||
| 			assert.Error(t, err) | ||||
| 
 | ||||
| 			err = scenario.Shutdown() | ||||
| 			assert.NoError(t, err) | ||||
| 		}) | ||||
| 	} | ||||
| } | ||||
| @ -953,10 +940,10 @@ func TestACLDevice1CanAccessDevice2(t *testing.T) { | ||||
| 			test1ip6 := netip.MustParseAddr("fd7a:115c:a1e0::1") | ||||
| 			test1, err := scenario.FindTailscaleClientByIP(test1ip) | ||||
| 			assert.NotNil(t, test1) | ||||
| 			assert.NoError(t, err) | ||||
| 			assertNoErr(t, err) | ||||
| 
 | ||||
| 			test1fqdn, err := test1.FQDN() | ||||
| 			assert.NoError(t, err) | ||||
| 			assertNoErr(t, err) | ||||
| 			test1ipURL := fmt.Sprintf("http://%s/etc/hostname", test1ip.String()) | ||||
| 			test1ip6URL := fmt.Sprintf("http://[%s]/etc/hostname", test1ip6.String()) | ||||
| 			test1fqdnURL := fmt.Sprintf("http://%s/etc/hostname", test1fqdn) | ||||
| @ -965,10 +952,10 @@ func TestACLDevice1CanAccessDevice2(t *testing.T) { | ||||
| 			test2ip6 := netip.MustParseAddr("fd7a:115c:a1e0::2") | ||||
| 			test2, err := scenario.FindTailscaleClientByIP(test2ip) | ||||
| 			assert.NotNil(t, test2) | ||||
| 			assert.NoError(t, err) | ||||
| 			assertNoErr(t, err) | ||||
| 
 | ||||
| 			test2fqdn, err := test2.FQDN() | ||||
| 			assert.NoError(t, err) | ||||
| 			assertNoErr(t, err) | ||||
| 			test2ipURL := fmt.Sprintf("http://%s/etc/hostname", test2ip.String()) | ||||
| 			test2ip6URL := fmt.Sprintf("http://[%s]/etc/hostname", test2ip6.String()) | ||||
| 			test2fqdnURL := fmt.Sprintf("http://%s/etc/hostname", test2fqdn) | ||||
| @ -983,7 +970,7 @@ func TestACLDevice1CanAccessDevice2(t *testing.T) { | ||||
| 				test2ipURL, | ||||
| 				result, | ||||
| 			) | ||||
| 			assert.NoError(t, err) | ||||
| 			assertNoErr(t, err) | ||||
| 
 | ||||
| 			result, err = test1.Curl(test2ip6URL) | ||||
| 			assert.Lenf( | ||||
| @ -994,7 +981,7 @@ func TestACLDevice1CanAccessDevice2(t *testing.T) { | ||||
| 				test2ip6URL, | ||||
| 				result, | ||||
| 			) | ||||
| 			assert.NoError(t, err) | ||||
| 			assertNoErr(t, err) | ||||
| 
 | ||||
| 			result, err = test1.Curl(test2fqdnURL) | ||||
| 			assert.Lenf( | ||||
| @ -1005,7 +992,7 @@ func TestACLDevice1CanAccessDevice2(t *testing.T) { | ||||
| 				test2fqdnURL, | ||||
| 				result, | ||||
| 			) | ||||
| 			assert.NoError(t, err) | ||||
| 			assertNoErr(t, err) | ||||
| 
 | ||||
| 			result, err = test2.Curl(test1ipURL) | ||||
| 			assert.Empty(t, result) | ||||
| @ -1018,9 +1005,6 @@ func TestACLDevice1CanAccessDevice2(t *testing.T) { | ||||
| 			result, err = test2.Curl(test1fqdnURL) | ||||
| 			assert.Empty(t, result) | ||||
| 			assert.Error(t, err) | ||||
| 
 | ||||
| 			err = scenario.Shutdown() | ||||
| 			assert.NoError(t, err) | ||||
| 		}) | ||||
| 	} | ||||
| } | ||||
|  | ||||
| @ -42,22 +42,19 @@ func TestOIDCAuthenticationPingAll(t *testing.T) { | ||||
| 	t.Parallel() | ||||
| 
 | ||||
| 	baseScenario, err := NewScenario() | ||||
| 	if err != nil { | ||||
| 		t.Errorf("failed to create scenario: %s", err) | ||||
| 	} | ||||
| 	assertNoErr(t, err) | ||||
| 
 | ||||
| 	scenario := AuthOIDCScenario{ | ||||
| 		Scenario: baseScenario, | ||||
| 	} | ||||
| 	defer scenario.Shutdown() | ||||
| 
 | ||||
| 	spec := map[string]int{ | ||||
| 		"user1": len(TailscaleVersions), | ||||
| 	} | ||||
| 
 | ||||
| 	oidcConfig, err := scenario.runMockOIDC(defaultAccessTTL) | ||||
| 	if err != nil { | ||||
| 		t.Errorf("failed to run mock OIDC server: %s", err) | ||||
| 	} | ||||
| 	assertNoErrf(t, "failed to run mock OIDC server: %s", err) | ||||
| 
 | ||||
| 	oidcMap := map[string]string{ | ||||
| 		"HEADSCALE_OIDC_ISSUER":             oidcConfig.Issuer, | ||||
| @ -74,24 +71,16 @@ func TestOIDCAuthenticationPingAll(t *testing.T) { | ||||
| 		hsic.WithHostnameAsServerURL(), | ||||
| 		hsic.WithFileInContainer("/tmp/hs_client_oidc_secret", []byte(oidcConfig.ClientSecret)), | ||||
| 	) | ||||
| 	if err != nil { | ||||
| 		t.Errorf("failed to create headscale environment: %s", err) | ||||
| 	} | ||||
| 	assertNoErrHeadscaleEnv(t, err) | ||||
| 
 | ||||
| 	allClients, err := scenario.ListTailscaleClients() | ||||
| 	if err != nil { | ||||
| 		t.Errorf("failed to get clients: %s", err) | ||||
| 	} | ||||
| 	assertNoErrListClients(t, err) | ||||
| 
 | ||||
| 	allIps, err := scenario.ListTailscaleClientsIPs() | ||||
| 	if err != nil { | ||||
| 		t.Errorf("failed to get clients: %s", err) | ||||
| 	} | ||||
| 	assertNoErrListClientIPs(t, err) | ||||
| 
 | ||||
| 	err = scenario.WaitForTailscaleSync() | ||||
| 	if err != nil { | ||||
| 		t.Errorf("failed wait for tailscale clients to be in sync: %s", err) | ||||
| 	} | ||||
| 	assertNoErrSync(t, err) | ||||
| 
 | ||||
| 	allAddrs := lo.Map(allIps, func(x netip.Addr, index int) string { | ||||
| 		return x.String() | ||||
| @ -99,11 +88,6 @@ func TestOIDCAuthenticationPingAll(t *testing.T) { | ||||
| 
 | ||||
| 	success := pingAllHelper(t, allClients, allAddrs) | ||||
| 	t.Logf("%d successful pings out of %d", success, len(allClients)*len(allIps)) | ||||
| 
 | ||||
| 	err = scenario.Shutdown() | ||||
| 	if err != nil { | ||||
| 		t.Errorf("failed to tear down scenario: %s", err) | ||||
| 	} | ||||
| } | ||||
| 
 | ||||
| func TestOIDCExpireNodesBasedOnTokenExpiry(t *testing.T) { | ||||
| @ -113,22 +97,19 @@ func TestOIDCExpireNodesBasedOnTokenExpiry(t *testing.T) { | ||||
| 	shortAccessTTL := 5 * time.Minute | ||||
| 
 | ||||
| 	baseScenario, err := NewScenario() | ||||
| 	if err != nil { | ||||
| 		t.Errorf("failed to create scenario: %s", err) | ||||
| 	} | ||||
| 	assertNoErr(t, err) | ||||
| 
 | ||||
| 	scenario := AuthOIDCScenario{ | ||||
| 		Scenario: baseScenario, | ||||
| 	} | ||||
| 	defer scenario.Shutdown() | ||||
| 
 | ||||
| 	spec := map[string]int{ | ||||
| 		"user1": len(TailscaleVersions), | ||||
| 	} | ||||
| 
 | ||||
| 	oidcConfig, err := scenario.runMockOIDC(shortAccessTTL) | ||||
| 	if err != nil { | ||||
| 		t.Fatalf("failed to run mock OIDC server: %s", err) | ||||
| 	} | ||||
| 	assertNoErrf(t, "failed to run mock OIDC server: %s", err) | ||||
| 
 | ||||
| 	oidcMap := map[string]string{ | ||||
| 		"HEADSCALE_OIDC_ISSUER":                oidcConfig.Issuer, | ||||
| @ -144,24 +125,16 @@ func TestOIDCExpireNodesBasedOnTokenExpiry(t *testing.T) { | ||||
| 		hsic.WithConfigEnv(oidcMap), | ||||
| 		hsic.WithHostnameAsServerURL(), | ||||
| 	) | ||||
| 	if err != nil { | ||||
| 		t.Errorf("failed to create headscale environment: %s", err) | ||||
| 	} | ||||
| 	assertNoErrHeadscaleEnv(t, err) | ||||
| 
 | ||||
| 	allClients, err := scenario.ListTailscaleClients() | ||||
| 	if err != nil { | ||||
| 		t.Errorf("failed to get clients: %s", err) | ||||
| 	} | ||||
| 	assertNoErrListClients(t, err) | ||||
| 
 | ||||
| 	allIps, err := scenario.ListTailscaleClientsIPs() | ||||
| 	if err != nil { | ||||
| 		t.Errorf("failed to get clients: %s", err) | ||||
| 	} | ||||
| 	assertNoErrListClientIPs(t, err) | ||||
| 
 | ||||
| 	err = scenario.WaitForTailscaleSync() | ||||
| 	if err != nil { | ||||
| 		t.Errorf("failed wait for tailscale clients to be in sync: %s", err) | ||||
| 	} | ||||
| 	assertNoErrSync(t, err) | ||||
| 
 | ||||
| 	allAddrs := lo.Map(allIps, func(x netip.Addr, index int) string { | ||||
| 		return x.String() | ||||
| @ -171,12 +144,8 @@ func TestOIDCExpireNodesBasedOnTokenExpiry(t *testing.T) { | ||||
| 	t.Logf("%d successful pings out of %d (before expiry)", success, len(allClients)*len(allIps)) | ||||
| 
 | ||||
| 	// await all nodes being logged out after OIDC token expiry
 | ||||
| 	scenario.WaitForTailscaleLogout() | ||||
| 
 | ||||
| 	err = scenario.Shutdown() | ||||
| 	if err != nil { | ||||
| 		t.Errorf("failed to tear down scenario: %s", err) | ||||
| 	} | ||||
| 	err = scenario.WaitForTailscaleLogout() | ||||
| 	assertNoErrLogout(t, err) | ||||
| } | ||||
| 
 | ||||
| func (s *AuthOIDCScenario) CreateHeadscaleEnv( | ||||
| @ -188,7 +157,7 @@ func (s *AuthOIDCScenario) CreateHeadscaleEnv( | ||||
| 		return err | ||||
| 	} | ||||
| 
 | ||||
| 	err = headscale.WaitForReady() | ||||
| 	err = headscale.WaitForRunning() | ||||
| 	if err != nil { | ||||
| 		return err | ||||
| 	} | ||||
| @ -311,15 +280,11 @@ func (s *AuthOIDCScenario) runTailscaleUp( | ||||
| 	log.Printf("running tailscale up for user %s", userStr) | ||||
| 	if user, ok := s.users[userStr]; ok { | ||||
| 		for _, client := range user.Clients { | ||||
| 			user.joinWaitGroup.Add(1) | ||||
| 
 | ||||
| 			go func(c TailscaleClient) { | ||||
| 				defer user.joinWaitGroup.Done() | ||||
| 
 | ||||
| 				// TODO(juanfont): error handle this
 | ||||
| 				loginURL, err := c.UpWithLoginURL(loginServer) | ||||
| 			c := client | ||||
| 			user.joinWaitGroup.Go(func() error { | ||||
| 				loginURL, err := c.LoginWithURL(loginServer) | ||||
| 				if err != nil { | ||||
| 					log.Printf("failed to run tailscale up: %s", err) | ||||
| 					log.Printf("%s failed to run tailscale up: %s", c.Hostname(), err) | ||||
| 				} | ||||
| 
 | ||||
| 				loginURL.Host = fmt.Sprintf("%s:8080", headscale.GetIP()) | ||||
| @ -336,9 +301,14 @@ func (s *AuthOIDCScenario) runTailscaleUp( | ||||
| 				req, _ := http.NewRequestWithContext(ctx, http.MethodGet, loginURL.String(), nil) | ||||
| 				resp, err := httpClient.Do(req) | ||||
| 				if err != nil { | ||||
| 					log.Printf("%s failed to get login url %s: %s", c.Hostname(), loginURL, err) | ||||
| 					log.Printf( | ||||
| 						"%s failed to get login url %s: %s", | ||||
| 						c.Hostname(), | ||||
| 						loginURL, | ||||
| 						err, | ||||
| 					) | ||||
| 
 | ||||
| 					return | ||||
| 					return err | ||||
| 				} | ||||
| 
 | ||||
| 				defer resp.Body.Close() | ||||
| @ -347,28 +317,29 @@ func (s *AuthOIDCScenario) runTailscaleUp( | ||||
| 				if err != nil { | ||||
| 					log.Printf("%s failed to read response body: %s", c.Hostname(), err) | ||||
| 
 | ||||
| 					return | ||||
| 					return err | ||||
| 				} | ||||
| 
 | ||||
| 				log.Printf("Finished request for %s to join tailnet", c.Hostname()) | ||||
| 			}(client) | ||||
| 
 | ||||
| 			err = client.WaitForReady() | ||||
| 			if err != nil { | ||||
| 				log.Printf("error waiting for client %s to be ready: %s", client.Hostname(), err) | ||||
| 			} | ||||
| 				return nil | ||||
| 			}) | ||||
| 
 | ||||
| 			log.Printf("client %s is ready", client.Hostname()) | ||||
| 		} | ||||
| 
 | ||||
| 		user.joinWaitGroup.Wait() | ||||
| 		if err := user.joinWaitGroup.Wait(); err != nil { | ||||
| 			return err | ||||
| 		} | ||||
| 
 | ||||
| 		for _, client := range user.Clients { | ||||
| 			err := client.WaitForReady() | ||||
| 			err := client.WaitForRunning() | ||||
| 			if err != nil { | ||||
| 				log.Printf("client %s was not ready: %s", client.Hostname(), err) | ||||
| 
 | ||||
| 				return fmt.Errorf("failed to up tailscale node: %w", err) | ||||
| 				return fmt.Errorf( | ||||
| 					"%s tailscale node has not reached running: %w", | ||||
| 					client.Hostname(), | ||||
| 					err, | ||||
| 				) | ||||
| 			} | ||||
| 		} | ||||
| 
 | ||||
| @ -378,11 +349,11 @@ func (s *AuthOIDCScenario) runTailscaleUp( | ||||
| 	return fmt.Errorf("failed to up tailscale node: %w", errNoUserAvailable) | ||||
| } | ||||
| 
 | ||||
| func (s *AuthOIDCScenario) Shutdown() error { | ||||
| func (s *AuthOIDCScenario) Shutdown() { | ||||
| 	err := s.pool.Purge(s.mockOIDC) | ||||
| 	if err != nil { | ||||
| 		return err | ||||
| 		log.Printf("failed to remove mock oidc container") | ||||
| 	} | ||||
| 
 | ||||
| 	return s.Scenario.Shutdown() | ||||
| 	s.Scenario.Shutdown() | ||||
| } | ||||
|  | ||||
| @ -28,12 +28,13 @@ func TestAuthWebFlowAuthenticationPingAll(t *testing.T) { | ||||
| 
 | ||||
| 	baseScenario, err := NewScenario() | ||||
| 	if err != nil { | ||||
| 		t.Errorf("failed to create scenario: %s", err) | ||||
| 		t.Fatalf("failed to create scenario: %s", err) | ||||
| 	} | ||||
| 
 | ||||
| 	scenario := AuthWebFlowScenario{ | ||||
| 		Scenario: baseScenario, | ||||
| 	} | ||||
| 	defer scenario.Shutdown() | ||||
| 
 | ||||
| 	spec := map[string]int{ | ||||
| 		"user1": len(TailscaleVersions), | ||||
| @ -41,24 +42,16 @@ func TestAuthWebFlowAuthenticationPingAll(t *testing.T) { | ||||
| 	} | ||||
| 
 | ||||
| 	err = scenario.CreateHeadscaleEnv(spec, hsic.WithTestName("webauthping")) | ||||
| 	if err != nil { | ||||
| 		t.Errorf("failed to create headscale environment: %s", err) | ||||
| 	} | ||||
| 	assertNoErrHeadscaleEnv(t, err) | ||||
| 
 | ||||
| 	allClients, err := scenario.ListTailscaleClients() | ||||
| 	if err != nil { | ||||
| 		t.Errorf("failed to get clients: %s", err) | ||||
| 	} | ||||
| 	assertNoErrListClients(t, err) | ||||
| 
 | ||||
| 	allIps, err := scenario.ListTailscaleClientsIPs() | ||||
| 	if err != nil { | ||||
| 		t.Errorf("failed to get clients: %s", err) | ||||
| 	} | ||||
| 	assertNoErrListClientIPs(t, err) | ||||
| 
 | ||||
| 	err = scenario.WaitForTailscaleSync() | ||||
| 	if err != nil { | ||||
| 		t.Errorf("failed wait for tailscale clients to be in sync: %s", err) | ||||
| 	} | ||||
| 	assertNoErrSync(t, err) | ||||
| 
 | ||||
| 	allAddrs := lo.Map(allIps, func(x netip.Addr, index int) string { | ||||
| 		return x.String() | ||||
| @ -66,11 +59,6 @@ func TestAuthWebFlowAuthenticationPingAll(t *testing.T) { | ||||
| 
 | ||||
| 	success := pingAllHelper(t, allClients, allAddrs) | ||||
| 	t.Logf("%d successful pings out of %d", success, len(allClients)*len(allIps)) | ||||
| 
 | ||||
| 	err = scenario.Shutdown() | ||||
| 	if err != nil { | ||||
| 		t.Errorf("failed to tear down scenario: %s", err) | ||||
| 	} | ||||
| } | ||||
| 
 | ||||
| func TestAuthWebFlowLogoutAndRelogin(t *testing.T) { | ||||
| @ -78,13 +66,12 @@ func TestAuthWebFlowLogoutAndRelogin(t *testing.T) { | ||||
| 	t.Parallel() | ||||
| 
 | ||||
| 	baseScenario, err := NewScenario() | ||||
| 	if err != nil { | ||||
| 		t.Errorf("failed to create scenario: %s", err) | ||||
| 	} | ||||
| 	assertNoErr(t, err) | ||||
| 
 | ||||
| 	scenario := AuthWebFlowScenario{ | ||||
| 		Scenario: baseScenario, | ||||
| 	} | ||||
| 	defer scenario.Shutdown() | ||||
| 
 | ||||
| 	spec := map[string]int{ | ||||
| 		"user1": len(TailscaleVersions), | ||||
| @ -92,24 +79,16 @@ func TestAuthWebFlowLogoutAndRelogin(t *testing.T) { | ||||
| 	} | ||||
| 
 | ||||
| 	err = scenario.CreateHeadscaleEnv(spec, hsic.WithTestName("weblogout")) | ||||
| 	if err != nil { | ||||
| 		t.Errorf("failed to create headscale environment: %s", err) | ||||
| 	} | ||||
| 	assertNoErrHeadscaleEnv(t, err) | ||||
| 
 | ||||
| 	allClients, err := scenario.ListTailscaleClients() | ||||
| 	if err != nil { | ||||
| 		t.Errorf("failed to get clients: %s", err) | ||||
| 	} | ||||
| 	assertNoErrListClients(t, err) | ||||
| 
 | ||||
| 	allIps, err := scenario.ListTailscaleClientsIPs() | ||||
| 	if err != nil { | ||||
| 		t.Errorf("failed to get clients: %s", err) | ||||
| 	} | ||||
| 	assertNoErrListClientIPs(t, err) | ||||
| 
 | ||||
| 	err = scenario.WaitForTailscaleSync() | ||||
| 	if err != nil { | ||||
| 		t.Errorf("failed wait for tailscale clients to be in sync: %s", err) | ||||
| 	} | ||||
| 	assertNoErrSync(t, err) | ||||
| 
 | ||||
| 	allAddrs := lo.Map(allIps, func(x netip.Addr, index int) string { | ||||
| 		return x.String() | ||||
| @ -122,7 +101,7 @@ func TestAuthWebFlowLogoutAndRelogin(t *testing.T) { | ||||
| 	for _, client := range allClients { | ||||
| 		ips, err := client.IPs() | ||||
| 		if err != nil { | ||||
| 			t.Errorf("failed to get IPs for client %s: %s", client.Hostname(), err) | ||||
| 			t.Fatalf("failed to get IPs for client %s: %s", client.Hostname(), err) | ||||
| 		} | ||||
| 		clientIPs[client] = ips | ||||
| 	} | ||||
| @ -130,37 +109,32 @@ func TestAuthWebFlowLogoutAndRelogin(t *testing.T) { | ||||
| 	for _, client := range allClients { | ||||
| 		err := client.Logout() | ||||
| 		if err != nil { | ||||
| 			t.Errorf("failed to logout client %s: %s", client.Hostname(), err) | ||||
| 			t.Fatalf("failed to logout client %s: %s", client.Hostname(), err) | ||||
| 		} | ||||
| 	} | ||||
| 
 | ||||
| 	scenario.WaitForTailscaleLogout() | ||||
| 	err = scenario.WaitForTailscaleLogout() | ||||
| 	assertNoErrLogout(t, err) | ||||
| 
 | ||||
| 	t.Logf("all clients logged out") | ||||
| 
 | ||||
| 	headscale, err := scenario.Headscale() | ||||
| 	if err != nil { | ||||
| 		t.Errorf("failed to get headscale server: %s", err) | ||||
| 	} | ||||
| 	assertNoErrGetHeadscale(t, err) | ||||
| 
 | ||||
| 	for userName := range spec { | ||||
| 		err = scenario.runTailscaleUp(userName, headscale.GetEndpoint()) | ||||
| 		if err != nil { | ||||
| 			t.Errorf("failed to run tailscale up: %s", err) | ||||
| 			t.Fatalf("failed to run tailscale up: %s", err) | ||||
| 		} | ||||
| 	} | ||||
| 
 | ||||
| 	t.Logf("all clients logged in again") | ||||
| 
 | ||||
| 	allClients, err = scenario.ListTailscaleClients() | ||||
| 	if err != nil { | ||||
| 		t.Errorf("failed to get clients: %s", err) | ||||
| 	} | ||||
| 	assertNoErrListClients(t, err) | ||||
| 
 | ||||
| 	allIps, err = scenario.ListTailscaleClientsIPs() | ||||
| 	if err != nil { | ||||
| 		t.Errorf("failed to get clients: %s", err) | ||||
| 	} | ||||
| 	assertNoErrListClientIPs(t, err) | ||||
| 
 | ||||
| 	allAddrs = lo.Map(allIps, func(x netip.Addr, index int) string { | ||||
| 		return x.String() | ||||
| @ -172,12 +146,12 @@ func TestAuthWebFlowLogoutAndRelogin(t *testing.T) { | ||||
| 	for _, client := range allClients { | ||||
| 		ips, err := client.IPs() | ||||
| 		if err != nil { | ||||
| 			t.Errorf("failed to get IPs for client %s: %s", client.Hostname(), err) | ||||
| 			t.Fatalf("failed to get IPs for client %s: %s", client.Hostname(), err) | ||||
| 		} | ||||
| 
 | ||||
| 		// lets check if the IPs are the same
 | ||||
| 		if len(ips) != len(clientIPs[client]) { | ||||
| 			t.Errorf("IPs changed for client %s", client.Hostname()) | ||||
| 			t.Fatalf("IPs changed for client %s", client.Hostname()) | ||||
| 		} | ||||
| 
 | ||||
| 		for _, ip := range ips { | ||||
| @ -191,7 +165,7 @@ func TestAuthWebFlowLogoutAndRelogin(t *testing.T) { | ||||
| 			} | ||||
| 
 | ||||
| 			if !found { | ||||
| 				t.Errorf( | ||||
| 				t.Fatalf( | ||||
| 					"IPs changed for client %s. Used to be %v now %v", | ||||
| 					client.Hostname(), | ||||
| 					clientIPs[client], | ||||
| @ -202,11 +176,6 @@ func TestAuthWebFlowLogoutAndRelogin(t *testing.T) { | ||||
| 	} | ||||
| 
 | ||||
| 	t.Logf("all clients IPs are the same") | ||||
| 
 | ||||
| 	err = scenario.Shutdown() | ||||
| 	if err != nil { | ||||
| 		t.Errorf("failed to tear down scenario: %s", err) | ||||
| 	} | ||||
| } | ||||
| 
 | ||||
| func (s *AuthWebFlowScenario) CreateHeadscaleEnv( | ||||
| @ -218,7 +187,7 @@ func (s *AuthWebFlowScenario) CreateHeadscaleEnv( | ||||
| 		return err | ||||
| 	} | ||||
| 
 | ||||
| 	err = headscale.WaitForReady() | ||||
| 	err = headscale.WaitForRunning() | ||||
| 	if err != nil { | ||||
| 		return err | ||||
| 	} | ||||
| @ -250,36 +219,39 @@ func (s *AuthWebFlowScenario) runTailscaleUp( | ||||
| 	log.Printf("running tailscale up for user %s", userStr) | ||||
| 	if user, ok := s.users[userStr]; ok { | ||||
| 		for _, client := range user.Clients { | ||||
| 			user.joinWaitGroup.Add(1) | ||||
| 
 | ||||
| 			go func(c TailscaleClient) { | ||||
| 				defer user.joinWaitGroup.Done() | ||||
| 
 | ||||
| 				// TODO(juanfont): error handle this
 | ||||
| 				loginURL, err := c.UpWithLoginURL(loginServer) | ||||
| 			c := client | ||||
| 			user.joinWaitGroup.Go(func() error { | ||||
| 				loginURL, err := c.LoginWithURL(loginServer) | ||||
| 				if err != nil { | ||||
| 					log.Printf("failed to run tailscale up: %s", err) | ||||
| 					log.Printf("failed to run tailscale up (%s): %s", c.Hostname(), err) | ||||
| 
 | ||||
| 					return err | ||||
| 				} | ||||
| 
 | ||||
| 				err = s.runHeadscaleRegister(userStr, loginURL) | ||||
| 				if err != nil { | ||||
| 					log.Printf("failed to register client: %s", err) | ||||
| 				} | ||||
| 			}(client) | ||||
| 					log.Printf("failed to register client (%s): %s", c.Hostname(), err) | ||||
| 
 | ||||
| 			err := client.WaitForReady() | ||||
| 					return err | ||||
| 				} | ||||
| 
 | ||||
| 				return nil | ||||
| 			}) | ||||
| 
 | ||||
| 			err := client.WaitForRunning() | ||||
| 			if err != nil { | ||||
| 				log.Printf("error waiting for client %s to be ready: %s", client.Hostname(), err) | ||||
| 			} | ||||
| 		} | ||||
| 		user.joinWaitGroup.Wait() | ||||
| 
 | ||||
| 		if err := user.joinWaitGroup.Wait(); err != nil { | ||||
| 			return err | ||||
| 		} | ||||
| 
 | ||||
| 		for _, client := range user.Clients { | ||||
| 			err := client.WaitForReady() | ||||
| 			err := client.WaitForRunning() | ||||
| 			if err != nil { | ||||
| 				log.Printf("client %s was not ready: %s", client.Hostname(), err) | ||||
| 
 | ||||
| 				return fmt.Errorf("failed to up tailscale node: %w", err) | ||||
| 				return fmt.Errorf("%s failed to up tailscale node: %w", client.Hostname(), err) | ||||
| 			} | ||||
| 		} | ||||
| 
 | ||||
|  | ||||
| @ -33,7 +33,8 @@ func TestUserCommand(t *testing.T) { | ||||
| 	t.Parallel() | ||||
| 
 | ||||
| 	scenario, err := NewScenario() | ||||
| 	assert.NoError(t, err) | ||||
| 	assertNoErr(t, err) | ||||
| 	defer scenario.Shutdown() | ||||
| 
 | ||||
| 	spec := map[string]int{ | ||||
| 		"user1": 0, | ||||
| @ -41,10 +42,10 @@ func TestUserCommand(t *testing.T) { | ||||
| 	} | ||||
| 
 | ||||
| 	err = scenario.CreateHeadscaleEnv(spec, []tsic.Option{}, hsic.WithTestName("clins")) | ||||
| 	assert.NoError(t, err) | ||||
| 	assertNoErr(t, err) | ||||
| 
 | ||||
| 	headscale, err := scenario.Headscale() | ||||
| 	assert.NoError(t, err) | ||||
| 	assertNoErr(t, err) | ||||
| 
 | ||||
| 	var listUsers []v1.User | ||||
| 	err = executeAndUnmarshal(headscale, | ||||
| @ -57,7 +58,7 @@ func TestUserCommand(t *testing.T) { | ||||
| 		}, | ||||
| 		&listUsers, | ||||
| 	) | ||||
| 	assert.NoError(t, err) | ||||
| 	assertNoErr(t, err) | ||||
| 
 | ||||
| 	result := []string{listUsers[0].Name, listUsers[1].Name} | ||||
| 	sort.Strings(result) | ||||
| @ -79,7 +80,7 @@ func TestUserCommand(t *testing.T) { | ||||
| 			"newname", | ||||
| 		}, | ||||
| 	) | ||||
| 	assert.NoError(t, err) | ||||
| 	assertNoErr(t, err) | ||||
| 
 | ||||
| 	var listAfterRenameUsers []v1.User | ||||
| 	err = executeAndUnmarshal(headscale, | ||||
| @ -92,7 +93,7 @@ func TestUserCommand(t *testing.T) { | ||||
| 		}, | ||||
| 		&listAfterRenameUsers, | ||||
| 	) | ||||
| 	assert.NoError(t, err) | ||||
| 	assertNoErr(t, err) | ||||
| 
 | ||||
| 	result = []string{listAfterRenameUsers[0].Name, listAfterRenameUsers[1].Name} | ||||
| 	sort.Strings(result) | ||||
| @ -102,9 +103,6 @@ func TestUserCommand(t *testing.T) { | ||||
| 		[]string{"newname", "user1"}, | ||||
| 		result, | ||||
| 	) | ||||
| 
 | ||||
| 	err = scenario.Shutdown() | ||||
| 	assert.NoError(t, err) | ||||
| } | ||||
| 
 | ||||
| func TestPreAuthKeyCommand(t *testing.T) { | ||||
| @ -115,20 +113,21 @@ func TestPreAuthKeyCommand(t *testing.T) { | ||||
| 	count := 3 | ||||
| 
 | ||||
| 	scenario, err := NewScenario() | ||||
| 	assert.NoError(t, err) | ||||
| 	assertNoErr(t, err) | ||||
| 	defer scenario.Shutdown() | ||||
| 
 | ||||
| 	spec := map[string]int{ | ||||
| 		user: 0, | ||||
| 	} | ||||
| 
 | ||||
| 	err = scenario.CreateHeadscaleEnv(spec, []tsic.Option{}, hsic.WithTestName("clipak")) | ||||
| 	assert.NoError(t, err) | ||||
| 	assertNoErr(t, err) | ||||
| 
 | ||||
| 	headscale, err := scenario.Headscale() | ||||
| 	assert.NoError(t, err) | ||||
| 	assertNoErr(t, err) | ||||
| 
 | ||||
| 	keys := make([]*v1.PreAuthKey, count) | ||||
| 	assert.NoError(t, err) | ||||
| 	assertNoErr(t, err) | ||||
| 
 | ||||
| 	for index := 0; index < count; index++ { | ||||
| 		var preAuthKey v1.PreAuthKey | ||||
| @ -150,7 +149,7 @@ func TestPreAuthKeyCommand(t *testing.T) { | ||||
| 			}, | ||||
| 			&preAuthKey, | ||||
| 		) | ||||
| 		assert.NoError(t, err) | ||||
| 		assertNoErr(t, err) | ||||
| 
 | ||||
| 		keys[index] = &preAuthKey | ||||
| 	} | ||||
| @ -171,7 +170,7 @@ func TestPreAuthKeyCommand(t *testing.T) { | ||||
| 		}, | ||||
| 		&listedPreAuthKeys, | ||||
| 	) | ||||
| 	assert.NoError(t, err) | ||||
| 	assertNoErr(t, err) | ||||
| 
 | ||||
| 	// There is one key created by "scenario.CreateHeadscaleEnv"
 | ||||
| 	assert.Len(t, listedPreAuthKeys, 4) | ||||
| @ -222,7 +221,7 @@ func TestPreAuthKeyCommand(t *testing.T) { | ||||
| 			listedPreAuthKeys[1].Key, | ||||
| 		}, | ||||
| 	) | ||||
| 	assert.NoError(t, err) | ||||
| 	assertNoErr(t, err) | ||||
| 
 | ||||
| 	var listedPreAuthKeysAfterExpire []v1.PreAuthKey | ||||
| 	err = executeAndUnmarshal( | ||||
| @ -238,14 +237,11 @@ func TestPreAuthKeyCommand(t *testing.T) { | ||||
| 		}, | ||||
| 		&listedPreAuthKeysAfterExpire, | ||||
| 	) | ||||
| 	assert.NoError(t, err) | ||||
| 	assertNoErr(t, err) | ||||
| 
 | ||||
| 	assert.True(t, listedPreAuthKeysAfterExpire[1].Expiration.AsTime().Before(time.Now())) | ||||
| 	assert.True(t, listedPreAuthKeysAfterExpire[2].Expiration.AsTime().After(time.Now())) | ||||
| 	assert.True(t, listedPreAuthKeysAfterExpire[3].Expiration.AsTime().After(time.Now())) | ||||
| 
 | ||||
| 	err = scenario.Shutdown() | ||||
| 	assert.NoError(t, err) | ||||
| } | ||||
| 
 | ||||
| func TestPreAuthKeyCommandWithoutExpiry(t *testing.T) { | ||||
| @ -255,17 +251,18 @@ func TestPreAuthKeyCommandWithoutExpiry(t *testing.T) { | ||||
| 	user := "pre-auth-key-without-exp-user" | ||||
| 
 | ||||
| 	scenario, err := NewScenario() | ||||
| 	assert.NoError(t, err) | ||||
| 	assertNoErr(t, err) | ||||
| 	defer scenario.Shutdown() | ||||
| 
 | ||||
| 	spec := map[string]int{ | ||||
| 		user: 0, | ||||
| 	} | ||||
| 
 | ||||
| 	err = scenario.CreateHeadscaleEnv(spec, []tsic.Option{}, hsic.WithTestName("clipaknaexp")) | ||||
| 	assert.NoError(t, err) | ||||
| 	assertNoErr(t, err) | ||||
| 
 | ||||
| 	headscale, err := scenario.Headscale() | ||||
| 	assert.NoError(t, err) | ||||
| 	assertNoErr(t, err) | ||||
| 
 | ||||
| 	var preAuthKey v1.PreAuthKey | ||||
| 	err = executeAndUnmarshal( | ||||
| @ -282,7 +279,7 @@ func TestPreAuthKeyCommandWithoutExpiry(t *testing.T) { | ||||
| 		}, | ||||
| 		&preAuthKey, | ||||
| 	) | ||||
| 	assert.NoError(t, err) | ||||
| 	assertNoErr(t, err) | ||||
| 
 | ||||
| 	var listedPreAuthKeys []v1.PreAuthKey | ||||
| 	err = executeAndUnmarshal( | ||||
| @ -298,7 +295,7 @@ func TestPreAuthKeyCommandWithoutExpiry(t *testing.T) { | ||||
| 		}, | ||||
| 		&listedPreAuthKeys, | ||||
| 	) | ||||
| 	assert.NoError(t, err) | ||||
| 	assertNoErr(t, err) | ||||
| 
 | ||||
| 	// There is one key created by "scenario.CreateHeadscaleEnv"
 | ||||
| 	assert.Len(t, listedPreAuthKeys, 2) | ||||
| @ -308,9 +305,6 @@ func TestPreAuthKeyCommandWithoutExpiry(t *testing.T) { | ||||
| 		t, | ||||
| 		listedPreAuthKeys[1].Expiration.AsTime().Before(time.Now().Add(time.Minute*70)), | ||||
| 	) | ||||
| 
 | ||||
| 	err = scenario.Shutdown() | ||||
| 	assert.NoError(t, err) | ||||
| } | ||||
| 
 | ||||
| func TestPreAuthKeyCommandReusableEphemeral(t *testing.T) { | ||||
| @ -320,17 +314,18 @@ func TestPreAuthKeyCommandReusableEphemeral(t *testing.T) { | ||||
| 	user := "pre-auth-key-reus-ephm-user" | ||||
| 
 | ||||
| 	scenario, err := NewScenario() | ||||
| 	assert.NoError(t, err) | ||||
| 	assertNoErr(t, err) | ||||
| 	defer scenario.Shutdown() | ||||
| 
 | ||||
| 	spec := map[string]int{ | ||||
| 		user: 0, | ||||
| 	} | ||||
| 
 | ||||
| 	err = scenario.CreateHeadscaleEnv(spec, []tsic.Option{}, hsic.WithTestName("clipakresueeph")) | ||||
| 	assert.NoError(t, err) | ||||
| 	assertNoErr(t, err) | ||||
| 
 | ||||
| 	headscale, err := scenario.Headscale() | ||||
| 	assert.NoError(t, err) | ||||
| 	assertNoErr(t, err) | ||||
| 
 | ||||
| 	var preAuthReusableKey v1.PreAuthKey | ||||
| 	err = executeAndUnmarshal( | ||||
| @ -347,7 +342,7 @@ func TestPreAuthKeyCommandReusableEphemeral(t *testing.T) { | ||||
| 		}, | ||||
| 		&preAuthReusableKey, | ||||
| 	) | ||||
| 	assert.NoError(t, err) | ||||
| 	assertNoErr(t, err) | ||||
| 
 | ||||
| 	var preAuthEphemeralKey v1.PreAuthKey | ||||
| 	err = executeAndUnmarshal( | ||||
| @ -364,7 +359,7 @@ func TestPreAuthKeyCommandReusableEphemeral(t *testing.T) { | ||||
| 		}, | ||||
| 		&preAuthEphemeralKey, | ||||
| 	) | ||||
| 	assert.NoError(t, err) | ||||
| 	assertNoErr(t, err) | ||||
| 
 | ||||
| 	assert.True(t, preAuthEphemeralKey.GetEphemeral()) | ||||
| 	assert.False(t, preAuthEphemeralKey.GetReusable()) | ||||
| @ -383,13 +378,10 @@ func TestPreAuthKeyCommandReusableEphemeral(t *testing.T) { | ||||
| 		}, | ||||
| 		&listedPreAuthKeys, | ||||
| 	) | ||||
| 	assert.NoError(t, err) | ||||
| 	assertNoErr(t, err) | ||||
| 
 | ||||
| 	// There is one key created by "scenario.CreateHeadscaleEnv"
 | ||||
| 	assert.Len(t, listedPreAuthKeys, 3) | ||||
| 
 | ||||
| 	err = scenario.Shutdown() | ||||
| 	assert.NoError(t, err) | ||||
| } | ||||
| 
 | ||||
| func TestEnablingRoutes(t *testing.T) { | ||||
| @ -399,27 +391,24 @@ func TestEnablingRoutes(t *testing.T) { | ||||
| 	user := "enable-routing" | ||||
| 
 | ||||
| 	scenario, err := NewScenario() | ||||
| 	assert.NoError(t, err) | ||||
| 	assertNoErrf(t, "failed to create scenario: %s", err) | ||||
| 	defer scenario.Shutdown() | ||||
| 
 | ||||
| 	spec := map[string]int{ | ||||
| 		user: 3, | ||||
| 	} | ||||
| 
 | ||||
| 	err = scenario.CreateHeadscaleEnv(spec, []tsic.Option{}, hsic.WithTestName("clienableroute")) | ||||
| 	assert.NoError(t, err) | ||||
| 	assertNoErrHeadscaleEnv(t, err) | ||||
| 
 | ||||
| 	allClients, err := scenario.ListTailscaleClients() | ||||
| 	if err != nil { | ||||
| 		t.Errorf("failed to get clients: %s", err) | ||||
| 	} | ||||
| 	assertNoErrListClients(t, err) | ||||
| 
 | ||||
| 	err = scenario.WaitForTailscaleSync() | ||||
| 	if err != nil { | ||||
| 		t.Errorf("failed wait for tailscale clients to be in sync: %s", err) | ||||
| 	} | ||||
| 	assertNoErrSync(t, err) | ||||
| 
 | ||||
| 	headscale, err := scenario.Headscale() | ||||
| 	assert.NoError(t, err) | ||||
| 	assertNoErrGetHeadscale(t, err) | ||||
| 
 | ||||
| 	// advertise routes using the up command
 | ||||
| 	for i, client := range allClients { | ||||
| @ -432,13 +421,11 @@ func TestEnablingRoutes(t *testing.T) { | ||||
| 			"-login-server", headscale.GetEndpoint(), | ||||
| 			"--hostname", hostname, | ||||
| 		}) | ||||
| 		assert.NoError(t, err) | ||||
| 		assertNoErrf(t, "failed to advertise route: %s", err) | ||||
| 	} | ||||
| 
 | ||||
| 	err = scenario.WaitForTailscaleSync() | ||||
| 	if err != nil { | ||||
| 		t.Errorf("failed wait for tailscale clients to be in sync: %s", err) | ||||
| 	} | ||||
| 	assertNoErrSync(t, err) | ||||
| 
 | ||||
| 	var routes []*v1.Route | ||||
| 	err = executeAndUnmarshal( | ||||
| @ -453,7 +440,7 @@ func TestEnablingRoutes(t *testing.T) { | ||||
| 		&routes, | ||||
| 	) | ||||
| 
 | ||||
| 	assert.NoError(t, err) | ||||
| 	assertNoErr(t, err) | ||||
| 	assert.Len(t, routes, 3) | ||||
| 
 | ||||
| 	for _, route := range routes { | ||||
| @ -471,7 +458,7 @@ func TestEnablingRoutes(t *testing.T) { | ||||
| 				"--route", | ||||
| 				strconv.Itoa(int(route.Id)), | ||||
| 			}) | ||||
| 		assert.NoError(t, err) | ||||
| 		assertNoErr(t, err) | ||||
| 	} | ||||
| 
 | ||||
| 	var enablingRoutes []*v1.Route | ||||
| @ -486,7 +473,7 @@ func TestEnablingRoutes(t *testing.T) { | ||||
| 		}, | ||||
| 		&enablingRoutes, | ||||
| 	) | ||||
| 	assert.NoError(t, err) | ||||
| 	assertNoErr(t, err) | ||||
| 
 | ||||
| 	for _, route := range enablingRoutes { | ||||
| 		assert.Equal(t, route.Advertised, true) | ||||
| @ -504,7 +491,7 @@ func TestEnablingRoutes(t *testing.T) { | ||||
| 			"--route", | ||||
| 			strconv.Itoa(int(routeIDToBeDisabled)), | ||||
| 		}) | ||||
| 	assert.NoError(t, err) | ||||
| 	assertNoErr(t, err) | ||||
| 
 | ||||
| 	var disablingRoutes []*v1.Route | ||||
| 	err = executeAndUnmarshal( | ||||
| @ -518,7 +505,7 @@ func TestEnablingRoutes(t *testing.T) { | ||||
| 		}, | ||||
| 		&disablingRoutes, | ||||
| 	) | ||||
| 	assert.NoError(t, err) | ||||
| 	assertNoErr(t, err) | ||||
| 
 | ||||
| 	for _, route := range disablingRoutes { | ||||
| 		assert.Equal(t, true, route.Advertised) | ||||
| @ -540,7 +527,8 @@ func TestApiKeyCommand(t *testing.T) { | ||||
| 	count := 5 | ||||
| 
 | ||||
| 	scenario, err := NewScenario() | ||||
| 	assert.NoError(t, err) | ||||
| 	assertNoErr(t, err) | ||||
| 	defer scenario.Shutdown() | ||||
| 
 | ||||
| 	spec := map[string]int{ | ||||
| 		"user1": 0, | ||||
| @ -548,10 +536,10 @@ func TestApiKeyCommand(t *testing.T) { | ||||
| 	} | ||||
| 
 | ||||
| 	err = scenario.CreateHeadscaleEnv(spec, []tsic.Option{}, hsic.WithTestName("clins")) | ||||
| 	assert.NoError(t, err) | ||||
| 	assertNoErr(t, err) | ||||
| 
 | ||||
| 	headscale, err := scenario.Headscale() | ||||
| 	assert.NoError(t, err) | ||||
| 	assertNoErr(t, err) | ||||
| 
 | ||||
| 	keys := make([]string, count) | ||||
| 
 | ||||
| @ -675,9 +663,6 @@ func TestApiKeyCommand(t *testing.T) { | ||||
| 			) | ||||
| 		} | ||||
| 	} | ||||
| 
 | ||||
| 	err = scenario.Shutdown() | ||||
| 	assert.NoError(t, err) | ||||
| } | ||||
| 
 | ||||
| func TestNodeTagCommand(t *testing.T) { | ||||
| @ -685,17 +670,18 @@ func TestNodeTagCommand(t *testing.T) { | ||||
| 	t.Parallel() | ||||
| 
 | ||||
| 	scenario, err := NewScenario() | ||||
| 	assert.NoError(t, err) | ||||
| 	assertNoErr(t, err) | ||||
| 	defer scenario.Shutdown() | ||||
| 
 | ||||
| 	spec := map[string]int{ | ||||
| 		"user1": 0, | ||||
| 	} | ||||
| 
 | ||||
| 	err = scenario.CreateHeadscaleEnv(spec, []tsic.Option{}, hsic.WithTestName("clins")) | ||||
| 	assert.NoError(t, err) | ||||
| 	assertNoErr(t, err) | ||||
| 
 | ||||
| 	headscale, err := scenario.Headscale() | ||||
| 	assert.NoError(t, err) | ||||
| 	assertNoErr(t, err) | ||||
| 
 | ||||
| 	machineKeys := []string{ | ||||
| 		"nodekey:9b2ffa7e08cc421a3d2cca9012280f6a236fd0de0b4ce005b30a98ad930306fe", | ||||
| @ -810,9 +796,6 @@ func TestNodeTagCommand(t *testing.T) { | ||||
| 		found, | ||||
| 		"should find a machine with the tag 'tag:test' in the list of machines", | ||||
| 	) | ||||
| 
 | ||||
| 	err = scenario.Shutdown() | ||||
| 	assert.NoError(t, err) | ||||
| } | ||||
| 
 | ||||
| func TestNodeCommand(t *testing.T) { | ||||
| @ -820,7 +803,8 @@ func TestNodeCommand(t *testing.T) { | ||||
| 	t.Parallel() | ||||
| 
 | ||||
| 	scenario, err := NewScenario() | ||||
| 	assert.NoError(t, err) | ||||
| 	assertNoErr(t, err) | ||||
| 	defer scenario.Shutdown() | ||||
| 
 | ||||
| 	spec := map[string]int{ | ||||
| 		"machine-user": 0, | ||||
| @ -828,10 +812,10 @@ func TestNodeCommand(t *testing.T) { | ||||
| 	} | ||||
| 
 | ||||
| 	err = scenario.CreateHeadscaleEnv(spec, []tsic.Option{}, hsic.WithTestName("clins")) | ||||
| 	assert.NoError(t, err) | ||||
| 	assertNoErr(t, err) | ||||
| 
 | ||||
| 	headscale, err := scenario.Headscale() | ||||
| 	assert.NoError(t, err) | ||||
| 	assertNoErr(t, err) | ||||
| 
 | ||||
| 	// Randomly generated machine keys
 | ||||
| 	machineKeys := []string{ | ||||
| @ -1053,9 +1037,6 @@ func TestNodeCommand(t *testing.T) { | ||||
| 	assert.Nil(t, err) | ||||
| 
 | ||||
| 	assert.Len(t, listOnlyMachineUserAfterDelete, 4) | ||||
| 
 | ||||
| 	err = scenario.Shutdown() | ||||
| 	assert.NoError(t, err) | ||||
| } | ||||
| 
 | ||||
| func TestNodeExpireCommand(t *testing.T) { | ||||
| @ -1063,17 +1044,18 @@ func TestNodeExpireCommand(t *testing.T) { | ||||
| 	t.Parallel() | ||||
| 
 | ||||
| 	scenario, err := NewScenario() | ||||
| 	assert.NoError(t, err) | ||||
| 	assertNoErr(t, err) | ||||
| 	defer scenario.Shutdown() | ||||
| 
 | ||||
| 	spec := map[string]int{ | ||||
| 		"machine-expire-user": 0, | ||||
| 	} | ||||
| 
 | ||||
| 	err = scenario.CreateHeadscaleEnv(spec, []tsic.Option{}, hsic.WithTestName("clins")) | ||||
| 	assert.NoError(t, err) | ||||
| 	assertNoErr(t, err) | ||||
| 
 | ||||
| 	headscale, err := scenario.Headscale() | ||||
| 	assert.NoError(t, err) | ||||
| 	assertNoErr(t, err) | ||||
| 
 | ||||
| 	// Randomly generated machine keys
 | ||||
| 	machineKeys := []string{ | ||||
| @ -1182,9 +1164,6 @@ func TestNodeExpireCommand(t *testing.T) { | ||||
| 	assert.True(t, listAllAfterExpiry[2].Expiry.AsTime().Before(time.Now())) | ||||
| 	assert.True(t, listAllAfterExpiry[3].Expiry.AsTime().IsZero()) | ||||
| 	assert.True(t, listAllAfterExpiry[4].Expiry.AsTime().IsZero()) | ||||
| 
 | ||||
| 	err = scenario.Shutdown() | ||||
| 	assert.NoError(t, err) | ||||
| } | ||||
| 
 | ||||
| func TestNodeRenameCommand(t *testing.T) { | ||||
| @ -1192,17 +1171,18 @@ func TestNodeRenameCommand(t *testing.T) { | ||||
| 	t.Parallel() | ||||
| 
 | ||||
| 	scenario, err := NewScenario() | ||||
| 	assert.NoError(t, err) | ||||
| 	assertNoErr(t, err) | ||||
| 	defer scenario.Shutdown() | ||||
| 
 | ||||
| 	spec := map[string]int{ | ||||
| 		"machine-rename-command": 0, | ||||
| 	} | ||||
| 
 | ||||
| 	err = scenario.CreateHeadscaleEnv(spec, []tsic.Option{}, hsic.WithTestName("clins")) | ||||
| 	assert.NoError(t, err) | ||||
| 	assertNoErr(t, err) | ||||
| 
 | ||||
| 	headscale, err := scenario.Headscale() | ||||
| 	assert.NoError(t, err) | ||||
| 	assertNoErr(t, err) | ||||
| 
 | ||||
| 	// Randomly generated machine keys
 | ||||
| 	machineKeys := []string{ | ||||
| @ -1349,9 +1329,6 @@ func TestNodeRenameCommand(t *testing.T) { | ||||
| 	assert.Equal(t, "newmachine-3", listAllAfterRenameAttempt[2].GetGivenName()) | ||||
| 	assert.Contains(t, listAllAfterRenameAttempt[3].GetGivenName(), "machine-4") | ||||
| 	assert.Contains(t, listAllAfterRenameAttempt[4].GetGivenName(), "machine-5") | ||||
| 
 | ||||
| 	err = scenario.Shutdown() | ||||
| 	assert.NoError(t, err) | ||||
| } | ||||
| 
 | ||||
| func TestNodeMoveCommand(t *testing.T) { | ||||
| @ -1359,7 +1336,8 @@ func TestNodeMoveCommand(t *testing.T) { | ||||
| 	t.Parallel() | ||||
| 
 | ||||
| 	scenario, err := NewScenario() | ||||
| 	assert.NoError(t, err) | ||||
| 	assertNoErr(t, err) | ||||
| 	defer scenario.Shutdown() | ||||
| 
 | ||||
| 	spec := map[string]int{ | ||||
| 		"old-user": 0, | ||||
| @ -1367,10 +1345,10 @@ func TestNodeMoveCommand(t *testing.T) { | ||||
| 	} | ||||
| 
 | ||||
| 	err = scenario.CreateHeadscaleEnv(spec, []tsic.Option{}, hsic.WithTestName("clins")) | ||||
| 	assert.NoError(t, err) | ||||
| 	assertNoErr(t, err) | ||||
| 
 | ||||
| 	headscale, err := scenario.Headscale() | ||||
| 	assert.NoError(t, err) | ||||
| 	assertNoErr(t, err) | ||||
| 
 | ||||
| 	// Randomly generated machine key
 | ||||
| 	machineKey := "nodekey:688411b767663479632d44140f08a9fde87383adc7cdeb518f62ce28a17ef0aa" | ||||
| @ -1514,7 +1492,4 @@ func TestNodeMoveCommand(t *testing.T) { | ||||
| 	assert.Nil(t, err) | ||||
| 
 | ||||
| 	assert.Equal(t, machine.User.Name, "old-user") | ||||
| 
 | ||||
| 	err = scenario.Shutdown() | ||||
| 	assert.NoError(t, err) | ||||
| } | ||||
|  | ||||
| @ -13,7 +13,7 @@ type ControlServer interface { | ||||
| 	ConnectToNetwork(network *dockertest.Network) error | ||||
| 	GetHealthEndpoint() string | ||||
| 	GetEndpoint() string | ||||
| 	WaitForReady() error | ||||
| 	WaitForRunning() error | ||||
| 	CreateUser(user string) error | ||||
| 	CreateAuthKey(user string, reusable bool, ephemeral bool) (*v1.PreAuthKey, error) | ||||
| 	ListMachinesInUser(user string) ([]*v1.Machine, error) | ||||
|  | ||||
| @ -24,14 +24,13 @@ func TestDERPServerScenario(t *testing.T) { | ||||
| 	// t.Parallel()
 | ||||
| 
 | ||||
| 	baseScenario, err := NewScenario() | ||||
| 	if err != nil { | ||||
| 		t.Errorf("failed to create scenario: %s", err) | ||||
| 	} | ||||
| 	assertNoErr(t, err) | ||||
| 
 | ||||
| 	scenario := EmbeddedDERPServerScenario{ | ||||
| 		Scenario:     baseScenario, | ||||
| 		tsicNetworks: map[string]*dockertest.Network{}, | ||||
| 	} | ||||
| 	defer scenario.Shutdown() | ||||
| 
 | ||||
| 	spec := map[string]int{ | ||||
| 		"user1": len(TailscaleVersions), | ||||
| @ -53,39 +52,23 @@ func TestDERPServerScenario(t *testing.T) { | ||||
| 		hsic.WithTLS(), | ||||
| 		hsic.WithHostnameAsServerURL(), | ||||
| 	) | ||||
| 
 | ||||
| 	if err != nil { | ||||
| 		t.Errorf("failed to create headscale environment: %s", err) | ||||
| 	} | ||||
| 	assertNoErrHeadscaleEnv(t, err) | ||||
| 
 | ||||
| 	allClients, err := scenario.ListTailscaleClients() | ||||
| 	if err != nil { | ||||
| 		t.Errorf("failed to get clients: %s", err) | ||||
| 	} | ||||
| 	assertNoErrListClients(t, err) | ||||
| 
 | ||||
| 	allIps, err := scenario.ListTailscaleClientsIPs() | ||||
| 	if err != nil { | ||||
| 		t.Errorf("failed to get clients: %s", err) | ||||
| 	} | ||||
| 	assertNoErrListClientIPs(t, err) | ||||
| 
 | ||||
| 	err = scenario.WaitForTailscaleSync() | ||||
| 	if err != nil { | ||||
| 		t.Errorf("failed wait for tailscale clients to be in sync: %s", err) | ||||
| 	} | ||||
| 	assertNoErrSync(t, err) | ||||
| 
 | ||||
| 	allHostnames, err := scenario.ListTailscaleClientsFQDNs() | ||||
| 	if err != nil { | ||||
| 		t.Errorf("failed to get FQDNs: %s", err) | ||||
| 	} | ||||
| 	assertNoErrListFQDN(t, err) | ||||
| 
 | ||||
| 	success := pingDerpAllHelper(t, allClients, allHostnames) | ||||
| 
 | ||||
| 	t.Logf("%d successful pings out of %d", success, len(allClients)*len(allIps)) | ||||
| 
 | ||||
| 	err = scenario.Shutdown() | ||||
| 	if err != nil { | ||||
| 		t.Errorf("failed to tear down scenario: %s", err) | ||||
| 	} | ||||
| } | ||||
| 
 | ||||
| func (s *EmbeddedDERPServerScenario) CreateHeadscaleEnv( | ||||
| @ -105,7 +88,7 @@ func (s *EmbeddedDERPServerScenario) CreateHeadscaleEnv( | ||||
| 
 | ||||
| 	headscaleURL.Host = fmt.Sprintf("%s:%s", hsServer.GetHostname(), headscaleURL.Port()) | ||||
| 
 | ||||
| 	err = hsServer.WaitForReady() | ||||
| 	err = hsServer.WaitForRunning() | ||||
| 	if err != nil { | ||||
| 		return err | ||||
| 	} | ||||
| @ -186,16 +169,11 @@ func (s *EmbeddedDERPServerScenario) CreateTailscaleIsolatedNodesInUser( | ||||
| 
 | ||||
| 			cert := hsServer.GetCert() | ||||
| 
 | ||||
| 			user.createWaitGroup.Add(1) | ||||
| 
 | ||||
| 			opts = append(opts, | ||||
| 				tsic.WithHeadscaleTLS(cert), | ||||
| 			) | ||||
| 
 | ||||
| 			go func() { | ||||
| 				defer user.createWaitGroup.Done() | ||||
| 
 | ||||
| 				// TODO(kradalby): error handle this
 | ||||
| 			user.createWaitGroup.Go(func() error { | ||||
| 				tsClient, err := tsic.New( | ||||
| 					s.pool, | ||||
| 					version, | ||||
| @ -203,34 +181,45 @@ func (s *EmbeddedDERPServerScenario) CreateTailscaleIsolatedNodesInUser( | ||||
| 					opts..., | ||||
| 				) | ||||
| 				if err != nil { | ||||
| 					// return fmt.Errorf("failed to add tailscale node: %w", err)
 | ||||
| 					log.Printf("failed to create tailscale node: %s", err) | ||||
| 					return fmt.Errorf( | ||||
| 						"failed to create tailscale (%s) node: %w", | ||||
| 						tsClient.Hostname(), | ||||
| 						err, | ||||
| 					) | ||||
| 				} | ||||
| 
 | ||||
| 				err = tsClient.WaitForReady() | ||||
| 				err = tsClient.WaitForNeedsLogin() | ||||
| 				if err != nil { | ||||
| 					// return fmt.Errorf("failed to add tailscale node: %w", err)
 | ||||
| 					log.Printf("failed to wait for tailscaled: %s", err) | ||||
| 					return fmt.Errorf( | ||||
| 						"failed to wait for tailscaled (%s) to need login: %w", | ||||
| 						tsClient.Hostname(), | ||||
| 						err, | ||||
| 					) | ||||
| 				} | ||||
| 
 | ||||
| 				user.Clients[tsClient.Hostname()] = tsClient | ||||
| 			}() | ||||
| 
 | ||||
| 				return nil | ||||
| 			}) | ||||
| 		} | ||||
| 
 | ||||
| 		if err := user.createWaitGroup.Wait(); err != nil { | ||||
| 			return err | ||||
| 		} | ||||
| 		user.createWaitGroup.Wait() | ||||
| 
 | ||||
| 		return nil | ||||
| 	} | ||||
| 
 | ||||
| 	return fmt.Errorf("failed to add tailscale node: %w", errNoUserAvailable) | ||||
| 	return fmt.Errorf("failed to add tailscale nodes: %w", errNoUserAvailable) | ||||
| } | ||||
| 
 | ||||
| func (s *EmbeddedDERPServerScenario) Shutdown() error { | ||||
| func (s *EmbeddedDERPServerScenario) Shutdown() { | ||||
| 	for _, network := range s.tsicNetworks { | ||||
| 		err := s.pool.RemoveNetwork(network) | ||||
| 		if err != nil { | ||||
| 			return err | ||||
| 			log.Printf("failed to remove DERP network %s", network.Network.Name) | ||||
| 		} | ||||
| 	} | ||||
| 
 | ||||
| 	return s.Scenario.Shutdown() | ||||
| 	s.Scenario.Shutdown() | ||||
| } | ||||
|  | ||||
| @ -21,9 +21,8 @@ func TestPingAllByIP(t *testing.T) { | ||||
| 	t.Parallel() | ||||
| 
 | ||||
| 	scenario, err := NewScenario() | ||||
| 	if err != nil { | ||||
| 		t.Errorf("failed to create scenario: %s", err) | ||||
| 	} | ||||
| 	assertNoErr(t, err) | ||||
| 	defer scenario.Shutdown() | ||||
| 
 | ||||
| 	spec := map[string]int{ | ||||
| 		"user1": len(TailscaleVersions), | ||||
| @ -31,24 +30,16 @@ func TestPingAllByIP(t *testing.T) { | ||||
| 	} | ||||
| 
 | ||||
| 	err = scenario.CreateHeadscaleEnv(spec, []tsic.Option{}, hsic.WithTestName("pingallbyip")) | ||||
| 	if err != nil { | ||||
| 		t.Errorf("failed to create headscale environment: %s", err) | ||||
| 	} | ||||
| 	assertNoErrHeadscaleEnv(t, err) | ||||
| 
 | ||||
| 	allClients, err := scenario.ListTailscaleClients() | ||||
| 	if err != nil { | ||||
| 		t.Errorf("failed to get clients: %s", err) | ||||
| 	} | ||||
| 	assertNoErrListClients(t, err) | ||||
| 
 | ||||
| 	allIps, err := scenario.ListTailscaleClientsIPs() | ||||
| 	if err != nil { | ||||
| 		t.Errorf("failed to get clients: %s", err) | ||||
| 	} | ||||
| 	assertNoErrListClientIPs(t, err) | ||||
| 
 | ||||
| 	err = scenario.WaitForTailscaleSync() | ||||
| 	if err != nil { | ||||
| 		t.Errorf("failed wait for tailscale clients to be in sync: %s", err) | ||||
| 	} | ||||
| 	assertNoErrSync(t, err) | ||||
| 
 | ||||
| 	allAddrs := lo.Map(allIps, func(x netip.Addr, index int) string { | ||||
| 		return x.String() | ||||
| @ -56,11 +47,6 @@ func TestPingAllByIP(t *testing.T) { | ||||
| 
 | ||||
| 	success := pingAllHelper(t, allClients, allAddrs) | ||||
| 	t.Logf("%d successful pings out of %d", success, len(allClients)*len(allIps)) | ||||
| 
 | ||||
| 	err = scenario.Shutdown() | ||||
| 	if err != nil { | ||||
| 		t.Errorf("failed to tear down scenario: %s", err) | ||||
| 	} | ||||
| } | ||||
| 
 | ||||
| func TestAuthKeyLogoutAndRelogin(t *testing.T) { | ||||
| @ -68,9 +54,8 @@ func TestAuthKeyLogoutAndRelogin(t *testing.T) { | ||||
| 	t.Parallel() | ||||
| 
 | ||||
| 	scenario, err := NewScenario() | ||||
| 	if err != nil { | ||||
| 		t.Errorf("failed to create scenario: %s", err) | ||||
| 	} | ||||
| 	assertNoErr(t, err) | ||||
| 	defer scenario.Shutdown() | ||||
| 
 | ||||
| 	spec := map[string]int{ | ||||
| 		"user1": len(TailscaleVersions), | ||||
| @ -78,25 +63,19 @@ func TestAuthKeyLogoutAndRelogin(t *testing.T) { | ||||
| 	} | ||||
| 
 | ||||
| 	err = scenario.CreateHeadscaleEnv(spec, []tsic.Option{}, hsic.WithTestName("pingallbyip")) | ||||
| 	if err != nil { | ||||
| 		t.Errorf("failed to create headscale environment: %s", err) | ||||
| 	} | ||||
| 	assertNoErrHeadscaleEnv(t, err) | ||||
| 
 | ||||
| 	allClients, err := scenario.ListTailscaleClients() | ||||
| 	if err != nil { | ||||
| 		t.Errorf("failed to get clients: %s", err) | ||||
| 	} | ||||
| 	assertNoErrListClients(t, err) | ||||
| 
 | ||||
| 	err = scenario.WaitForTailscaleSync() | ||||
| 	if err != nil { | ||||
| 		t.Errorf("failed wait for tailscale clients to be in sync: %s", err) | ||||
| 	} | ||||
| 	assertNoErrSync(t, err) | ||||
| 
 | ||||
| 	clientIPs := make(map[TailscaleClient][]netip.Addr) | ||||
| 	for _, client := range allClients { | ||||
| 		ips, err := client.IPs() | ||||
| 		if err != nil { | ||||
| 			t.Errorf("failed to get IPs for client %s: %s", client.Hostname(), err) | ||||
| 			t.Fatalf("failed to get IPs for client %s: %s", client.Hostname(), err) | ||||
| 		} | ||||
| 		clientIPs[client] = ips | ||||
| 	} | ||||
| @ -104,45 +83,38 @@ func TestAuthKeyLogoutAndRelogin(t *testing.T) { | ||||
| 	for _, client := range allClients { | ||||
| 		err := client.Logout() | ||||
| 		if err != nil { | ||||
| 			t.Errorf("failed to logout client %s: %s", client.Hostname(), err) | ||||
| 			t.Fatalf("failed to logout client %s: %s", client.Hostname(), err) | ||||
| 		} | ||||
| 	} | ||||
| 
 | ||||
| 	scenario.WaitForTailscaleLogout() | ||||
| 	err = scenario.WaitForTailscaleLogout() | ||||
| 	assertNoErrLogout(t, err) | ||||
| 
 | ||||
| 	t.Logf("all clients logged out") | ||||
| 
 | ||||
| 	headscale, err := scenario.Headscale() | ||||
| 	if err != nil { | ||||
| 		t.Errorf("failed to get headscale server: %s", err) | ||||
| 	} | ||||
| 	assertNoErrGetHeadscale(t, err) | ||||
| 
 | ||||
| 	for userName := range spec { | ||||
| 		key, err := scenario.CreatePreAuthKey(userName, true, false) | ||||
| 		if err != nil { | ||||
| 			t.Errorf("failed to create pre-auth key for user %s: %s", userName, err) | ||||
| 			t.Fatalf("failed to create pre-auth key for user %s: %s", userName, err) | ||||
| 		} | ||||
| 
 | ||||
| 		err = scenario.RunTailscaleUp(userName, headscale.GetEndpoint(), key.GetKey()) | ||||
| 		if err != nil { | ||||
| 			t.Errorf("failed to run tailscale up for user %s: %s", userName, err) | ||||
| 			t.Fatalf("failed to run tailscale up for user %s: %s", userName, err) | ||||
| 		} | ||||
| 	} | ||||
| 
 | ||||
| 	err = scenario.WaitForTailscaleSync() | ||||
| 	if err != nil { | ||||
| 		t.Errorf("failed wait for tailscale clients to be in sync: %s", err) | ||||
| 	} | ||||
| 	assertNoErrSync(t, err) | ||||
| 
 | ||||
| 	allClients, err = scenario.ListTailscaleClients() | ||||
| 	if err != nil { | ||||
| 		t.Errorf("failed to get clients: %s", err) | ||||
| 	} | ||||
| 	assertNoErrListClients(t, err) | ||||
| 
 | ||||
| 	allIps, err := scenario.ListTailscaleClientsIPs() | ||||
| 	if err != nil { | ||||
| 		t.Errorf("failed to get clients: %s", err) | ||||
| 	} | ||||
| 	assertNoErrListClientIPs(t, err) | ||||
| 
 | ||||
| 	allAddrs := lo.Map(allIps, func(x netip.Addr, index int) string { | ||||
| 		return x.String() | ||||
| @ -154,12 +126,12 @@ func TestAuthKeyLogoutAndRelogin(t *testing.T) { | ||||
| 	for _, client := range allClients { | ||||
| 		ips, err := client.IPs() | ||||
| 		if err != nil { | ||||
| 			t.Errorf("failed to get IPs for client %s: %s", client.Hostname(), err) | ||||
| 			t.Fatalf("failed to get IPs for client %s: %s", client.Hostname(), err) | ||||
| 		} | ||||
| 
 | ||||
| 		// lets check if the IPs are the same
 | ||||
| 		if len(ips) != len(clientIPs[client]) { | ||||
| 			t.Errorf("IPs changed for client %s", client.Hostname()) | ||||
| 			t.Fatalf("IPs changed for client %s", client.Hostname()) | ||||
| 		} | ||||
| 
 | ||||
| 		for _, ip := range ips { | ||||
| @ -173,7 +145,7 @@ func TestAuthKeyLogoutAndRelogin(t *testing.T) { | ||||
| 			} | ||||
| 
 | ||||
| 			if !found { | ||||
| 				t.Errorf( | ||||
| 				t.Fatalf( | ||||
| 					"IPs changed for client %s. Used to be %v now %v", | ||||
| 					client.Hostname(), | ||||
| 					clientIPs[client], | ||||
| @ -182,13 +154,6 @@ func TestAuthKeyLogoutAndRelogin(t *testing.T) { | ||||
| 			} | ||||
| 		} | ||||
| 	} | ||||
| 
 | ||||
| 	t.Logf("all clients IPs are the same") | ||||
| 
 | ||||
| 	err = scenario.Shutdown() | ||||
| 	if err != nil { | ||||
| 		t.Errorf("failed to tear down scenario: %s", err) | ||||
| 	} | ||||
| } | ||||
| 
 | ||||
| func TestEphemeral(t *testing.T) { | ||||
| @ -196,9 +161,8 @@ func TestEphemeral(t *testing.T) { | ||||
| 	t.Parallel() | ||||
| 
 | ||||
| 	scenario, err := NewScenario() | ||||
| 	if err != nil { | ||||
| 		t.Errorf("failed to create scenario: %s", err) | ||||
| 	} | ||||
| 	assertNoErr(t, err) | ||||
| 	defer scenario.Shutdown() | ||||
| 
 | ||||
| 	spec := map[string]int{ | ||||
| 		"user1": len(TailscaleVersions), | ||||
| @ -206,46 +170,38 @@ func TestEphemeral(t *testing.T) { | ||||
| 	} | ||||
| 
 | ||||
| 	headscale, err := scenario.Headscale(hsic.WithTestName("ephemeral")) | ||||
| 	if err != nil { | ||||
| 		t.Errorf("failed to create headscale environment: %s", err) | ||||
| 	} | ||||
| 	assertNoErrHeadscaleEnv(t, err) | ||||
| 
 | ||||
| 	for userName, clientCount := range spec { | ||||
| 		err = scenario.CreateUser(userName) | ||||
| 		if err != nil { | ||||
| 			t.Errorf("failed to create user %s: %s", userName, err) | ||||
| 			t.Fatalf("failed to create user %s: %s", userName, err) | ||||
| 		} | ||||
| 
 | ||||
| 		err = scenario.CreateTailscaleNodesInUser(userName, "all", clientCount, []tsic.Option{}...) | ||||
| 		if err != nil { | ||||
| 			t.Errorf("failed to create tailscale nodes in user %s: %s", userName, err) | ||||
| 			t.Fatalf("failed to create tailscale nodes in user %s: %s", userName, err) | ||||
| 		} | ||||
| 
 | ||||
| 		key, err := scenario.CreatePreAuthKey(userName, true, true) | ||||
| 		if err != nil { | ||||
| 			t.Errorf("failed to create pre-auth key for user %s: %s", userName, err) | ||||
| 			t.Fatalf("failed to create pre-auth key for user %s: %s", userName, err) | ||||
| 		} | ||||
| 
 | ||||
| 		err = scenario.RunTailscaleUp(userName, headscale.GetEndpoint(), key.GetKey()) | ||||
| 		if err != nil { | ||||
| 			t.Errorf("failed to run tailscale up for user %s: %s", userName, err) | ||||
| 			t.Fatalf("failed to run tailscale up for user %s: %s", userName, err) | ||||
| 		} | ||||
| 	} | ||||
| 
 | ||||
| 	err = scenario.WaitForTailscaleSync() | ||||
| 	if err != nil { | ||||
| 		t.Errorf("failed wait for tailscale clients to be in sync: %s", err) | ||||
| 	} | ||||
| 	assertNoErrSync(t, err) | ||||
| 
 | ||||
| 	allClients, err := scenario.ListTailscaleClients() | ||||
| 	if err != nil { | ||||
| 		t.Errorf("failed to get clients: %s", err) | ||||
| 	} | ||||
| 	assertNoErrListClients(t, err) | ||||
| 
 | ||||
| 	allIps, err := scenario.ListTailscaleClientsIPs() | ||||
| 	if err != nil { | ||||
| 		t.Errorf("failed to get clients: %s", err) | ||||
| 	} | ||||
| 	assertNoErrListClientIPs(t, err) | ||||
| 
 | ||||
| 	allAddrs := lo.Map(allIps, func(x netip.Addr, index int) string { | ||||
| 		return x.String() | ||||
| @ -257,11 +213,12 @@ func TestEphemeral(t *testing.T) { | ||||
| 	for _, client := range allClients { | ||||
| 		err := client.Logout() | ||||
| 		if err != nil { | ||||
| 			t.Errorf("failed to logout client %s: %s", client.Hostname(), err) | ||||
| 			t.Fatalf("failed to logout client %s: %s", client.Hostname(), err) | ||||
| 		} | ||||
| 	} | ||||
| 
 | ||||
| 	scenario.WaitForTailscaleLogout() | ||||
| 	err = scenario.WaitForTailscaleLogout() | ||||
| 	assertNoErrLogout(t, err) | ||||
| 
 | ||||
| 	t.Logf("all clients logged out") | ||||
| 
 | ||||
| @ -277,14 +234,9 @@ func TestEphemeral(t *testing.T) { | ||||
| 		} | ||||
| 
 | ||||
| 		if len(machines) != 0 { | ||||
| 			t.Errorf("expected no machines, got %d in user %s", len(machines), userName) | ||||
| 			t.Fatalf("expected no machines, got %d in user %s", len(machines), userName) | ||||
| 		} | ||||
| 	} | ||||
| 
 | ||||
| 	err = scenario.Shutdown() | ||||
| 	if err != nil { | ||||
| 		t.Errorf("failed to tear down scenario: %s", err) | ||||
| 	} | ||||
| } | ||||
| 
 | ||||
| func TestPingAllByHostname(t *testing.T) { | ||||
| @ -292,9 +244,8 @@ func TestPingAllByHostname(t *testing.T) { | ||||
| 	t.Parallel() | ||||
| 
 | ||||
| 	scenario, err := NewScenario() | ||||
| 	if err != nil { | ||||
| 		t.Errorf("failed to create scenario: %s", err) | ||||
| 	} | ||||
| 	assertNoErr(t, err) | ||||
| 	defer scenario.Shutdown() | ||||
| 
 | ||||
| 	spec := map[string]int{ | ||||
| 		// Omit 1.16.2 (-1) because it does not have the FQDN field
 | ||||
| @ -303,33 +254,20 @@ func TestPingAllByHostname(t *testing.T) { | ||||
| 	} | ||||
| 
 | ||||
| 	err = scenario.CreateHeadscaleEnv(spec, []tsic.Option{}, hsic.WithTestName("pingallbyname")) | ||||
| 	if err != nil { | ||||
| 		t.Errorf("failed to create headscale environment: %s", err) | ||||
| 	} | ||||
| 	assertNoErrHeadscaleEnv(t, err) | ||||
| 
 | ||||
| 	allClients, err := scenario.ListTailscaleClients() | ||||
| 	if err != nil { | ||||
| 		t.Errorf("failed to get clients: %s", err) | ||||
| 	} | ||||
| 	assertNoErrListClients(t, err) | ||||
| 
 | ||||
| 	err = scenario.WaitForTailscaleSync() | ||||
| 	if err != nil { | ||||
| 		t.Errorf("failed wait for tailscale clients to be in sync: %s", err) | ||||
| 	} | ||||
| 	assertNoErrSync(t, err) | ||||
| 
 | ||||
| 	allHostnames, err := scenario.ListTailscaleClientsFQDNs() | ||||
| 	if err != nil { | ||||
| 		t.Errorf("failed to get FQDNs: %s", err) | ||||
| 	} | ||||
| 	assertNoErrListFQDN(t, err) | ||||
| 
 | ||||
| 	success := pingAllHelper(t, allClients, allHostnames) | ||||
| 
 | ||||
| 	t.Logf("%d successful pings out of %d", success, len(allClients)*len(allClients)) | ||||
| 
 | ||||
| 	err = scenario.Shutdown() | ||||
| 	if err != nil { | ||||
| 		t.Errorf("failed to tear down scenario: %s", err) | ||||
| 	} | ||||
| } | ||||
| 
 | ||||
| // If subtests are parallel, then they will start before setup is run.
 | ||||
| @ -354,9 +292,8 @@ func TestTaildrop(t *testing.T) { | ||||
| 	} | ||||
| 
 | ||||
| 	scenario, err := NewScenario() | ||||
| 	if err != nil { | ||||
| 		t.Errorf("failed to create scenario: %s", err) | ||||
| 	} | ||||
| 	assertNoErr(t, err) | ||||
| 	defer scenario.Shutdown() | ||||
| 
 | ||||
| 	spec := map[string]int{ | ||||
| 		// Omit 1.16.2 (-1) because it does not have the FQDN field
 | ||||
| @ -364,31 +301,23 @@ func TestTaildrop(t *testing.T) { | ||||
| 	} | ||||
| 
 | ||||
| 	err = scenario.CreateHeadscaleEnv(spec, []tsic.Option{}, hsic.WithTestName("taildrop")) | ||||
| 	if err != nil { | ||||
| 		t.Errorf("failed to create headscale environment: %s", err) | ||||
| 	} | ||||
| 	assertNoErrHeadscaleEnv(t, err) | ||||
| 
 | ||||
| 	allClients, err := scenario.ListTailscaleClients() | ||||
| 	if err != nil { | ||||
| 		t.Errorf("failed to get clients: %s", err) | ||||
| 	} | ||||
| 	assertNoErrListClients(t, err) | ||||
| 
 | ||||
| 	err = scenario.WaitForTailscaleSync() | ||||
| 	if err != nil { | ||||
| 		t.Errorf("failed wait for tailscale clients to be in sync: %s", err) | ||||
| 	} | ||||
| 	assertNoErrSync(t, err) | ||||
| 
 | ||||
| 	// This will essentially fetch and cache all the FQDNs
 | ||||
| 	_, err = scenario.ListTailscaleClientsFQDNs() | ||||
| 	if err != nil { | ||||
| 		t.Errorf("failed to get FQDNs: %s", err) | ||||
| 	} | ||||
| 	assertNoErrListFQDN(t, err) | ||||
| 
 | ||||
| 	for _, client := range allClients { | ||||
| 		command := []string{"touch", fmt.Sprintf("/tmp/file_from_%s", client.Hostname())} | ||||
| 
 | ||||
| 		if _, _, err := client.Execute(command); err != nil { | ||||
| 			t.Errorf("failed to create taildrop file on %s, err: %s", client.Hostname(), err) | ||||
| 			t.Fatalf("failed to create taildrop file on %s, err: %s", client.Hostname(), err) | ||||
| 		} | ||||
| 
 | ||||
| 		for _, peer := range allClients { | ||||
| @ -417,7 +346,7 @@ func TestTaildrop(t *testing.T) { | ||||
| 					return err | ||||
| 				}) | ||||
| 				if err != nil { | ||||
| 					t.Errorf( | ||||
| 					t.Fatalf( | ||||
| 						"failed to send taildrop file on %s, err: %s", | ||||
| 						client.Hostname(), | ||||
| 						err, | ||||
| @ -434,7 +363,7 @@ func TestTaildrop(t *testing.T) { | ||||
| 			"/tmp/", | ||||
| 		} | ||||
| 		if _, _, err := client.Execute(command); err != nil { | ||||
| 			t.Errorf("failed to get taildrop file on %s, err: %s", client.Hostname(), err) | ||||
| 			t.Fatalf("failed to get taildrop file on %s, err: %s", client.Hostname(), err) | ||||
| 		} | ||||
| 
 | ||||
| 		for _, peer := range allClients { | ||||
| @ -454,13 +383,11 @@ func TestTaildrop(t *testing.T) { | ||||
| 				) | ||||
| 
 | ||||
| 				result, _, err := client.Execute(command) | ||||
| 				if err != nil { | ||||
| 					t.Errorf("failed to execute command to ls taildrop: %s", err) | ||||
| 				} | ||||
| 				assertNoErrf(t, "failed to execute command to ls taildrop: %s", err) | ||||
| 
 | ||||
| 				log.Printf("Result for %s: %s\n", peer.Hostname(), result) | ||||
| 				if fmt.Sprintf("/tmp/file_from_%s\n", peer.Hostname()) != result { | ||||
| 					t.Errorf( | ||||
| 					t.Fatalf( | ||||
| 						"taildrop result is not correct %s, wanted %s", | ||||
| 						result, | ||||
| 						fmt.Sprintf("/tmp/file_from_%s\n", peer.Hostname()), | ||||
| @ -469,11 +396,6 @@ func TestTaildrop(t *testing.T) { | ||||
| 			}) | ||||
| 		} | ||||
| 	} | ||||
| 
 | ||||
| 	err = scenario.Shutdown() | ||||
| 	if err != nil { | ||||
| 		t.Errorf("failed to tear down scenario: %s", err) | ||||
| 	} | ||||
| } | ||||
| 
 | ||||
| func TestResolveMagicDNS(t *testing.T) { | ||||
| @ -481,9 +403,8 @@ func TestResolveMagicDNS(t *testing.T) { | ||||
| 	t.Parallel() | ||||
| 
 | ||||
| 	scenario, err := NewScenario() | ||||
| 	if err != nil { | ||||
| 		t.Errorf("failed to create scenario: %s", err) | ||||
| 	} | ||||
| 	assertNoErr(t, err) | ||||
| 	defer scenario.Shutdown() | ||||
| 
 | ||||
| 	spec := map[string]int{ | ||||
| 		// Omit 1.16.2 (-1) because it does not have the FQDN field
 | ||||
| @ -492,30 +413,20 @@ func TestResolveMagicDNS(t *testing.T) { | ||||
| 	} | ||||
| 
 | ||||
| 	err = scenario.CreateHeadscaleEnv(spec, []tsic.Option{}, hsic.WithTestName("magicdns")) | ||||
| 	if err != nil { | ||||
| 		t.Errorf("failed to create headscale environment: %s", err) | ||||
| 	} | ||||
| 	assertNoErrHeadscaleEnv(t, err) | ||||
| 
 | ||||
| 	allClients, err := scenario.ListTailscaleClients() | ||||
| 	if err != nil { | ||||
| 		t.Errorf("failed to get clients: %s", err) | ||||
| 	} | ||||
| 	assertNoErrListClients(t, err) | ||||
| 
 | ||||
| 	err = scenario.WaitForTailscaleSync() | ||||
| 	if err != nil { | ||||
| 		t.Errorf("failed wait for tailscale clients to be in sync: %s", err) | ||||
| 	} | ||||
| 	assertNoErrSync(t, err) | ||||
| 
 | ||||
| 	// Poor mans cache
 | ||||
| 	_, err = scenario.ListTailscaleClientsFQDNs() | ||||
| 	if err != nil { | ||||
| 		t.Errorf("failed to get FQDNs: %s", err) | ||||
| 	} | ||||
| 	assertNoErrListFQDN(t, err) | ||||
| 
 | ||||
| 	_, err = scenario.ListTailscaleClientsIPs() | ||||
| 	if err != nil { | ||||
| 		t.Errorf("failed to get IPs: %s", err) | ||||
| 	} | ||||
| 	assertNoErrListClientIPs(t, err) | ||||
| 
 | ||||
| 	for _, client := range allClients { | ||||
| 		for _, peer := range allClients { | ||||
| @ -528,7 +439,7 @@ func TestResolveMagicDNS(t *testing.T) { | ||||
| 			} | ||||
| 			result, _, err := client.Execute(command) | ||||
| 			if err != nil { | ||||
| 				t.Errorf( | ||||
| 				t.Fatalf( | ||||
| 					"failed to execute resolve/ip command %s from %s: %s", | ||||
| 					peerFQDN, | ||||
| 					client.Hostname(), | ||||
| @ -538,7 +449,7 @@ func TestResolveMagicDNS(t *testing.T) { | ||||
| 
 | ||||
| 			ips, err := peer.IPs() | ||||
| 			if err != nil { | ||||
| 				t.Errorf( | ||||
| 				t.Fatalf( | ||||
| 					"failed to get ips for %s: %s", | ||||
| 					peer.Hostname(), | ||||
| 					err, | ||||
| @ -547,16 +458,11 @@ func TestResolveMagicDNS(t *testing.T) { | ||||
| 
 | ||||
| 			for _, ip := range ips { | ||||
| 				if !strings.Contains(result, ip.String()) { | ||||
| 					t.Errorf("ip %s is not found in \n%s\n", ip.String(), result) | ||||
| 					t.Fatalf("ip %s is not found in \n%s\n", ip.String(), result) | ||||
| 				} | ||||
| 			} | ||||
| 		} | ||||
| 	} | ||||
| 
 | ||||
| 	err = scenario.Shutdown() | ||||
| 	if err != nil { | ||||
| 		t.Errorf("failed to tear down scenario: %s", err) | ||||
| 	} | ||||
| } | ||||
| 
 | ||||
| func TestExpireNode(t *testing.T) { | ||||
| @ -564,33 +470,24 @@ func TestExpireNode(t *testing.T) { | ||||
| 	t.Parallel() | ||||
| 
 | ||||
| 	scenario, err := NewScenario() | ||||
| 	if err != nil { | ||||
| 		t.Errorf("failed to create scenario: %s", err) | ||||
| 	} | ||||
| 	assertNoErr(t, err) | ||||
| 	defer scenario.Shutdown() | ||||
| 
 | ||||
| 	spec := map[string]int{ | ||||
| 		"user1": len(TailscaleVersions), | ||||
| 	} | ||||
| 
 | ||||
| 	err = scenario.CreateHeadscaleEnv(spec, []tsic.Option{}, hsic.WithTestName("expirenode")) | ||||
| 	if err != nil { | ||||
| 		t.Errorf("failed to create headscale environment: %s", err) | ||||
| 	} | ||||
| 	assertNoErrHeadscaleEnv(t, err) | ||||
| 
 | ||||
| 	allClients, err := scenario.ListTailscaleClients() | ||||
| 	if err != nil { | ||||
| 		t.Errorf("failed to get clients: %s", err) | ||||
| 	} | ||||
| 	assertNoErrListClients(t, err) | ||||
| 
 | ||||
| 	allIps, err := scenario.ListTailscaleClientsIPs() | ||||
| 	if err != nil { | ||||
| 		t.Errorf("failed to get clients: %s", err) | ||||
| 	} | ||||
| 	assertNoErrListClientIPs(t, err) | ||||
| 
 | ||||
| 	err = scenario.WaitForTailscaleSync() | ||||
| 	if err != nil { | ||||
| 		t.Errorf("failed wait for tailscale clients to be in sync: %s", err) | ||||
| 	} | ||||
| 	assertNoErrSync(t, err) | ||||
| 
 | ||||
| 	allAddrs := lo.Map(allIps, func(x netip.Addr, index int) string { | ||||
| 		return x.String() | ||||
| @ -601,25 +498,25 @@ func TestExpireNode(t *testing.T) { | ||||
| 
 | ||||
| 	for _, client := range allClients { | ||||
| 		status, err := client.Status() | ||||
| 		assert.NoError(t, err) | ||||
| 		assertNoErr(t, err) | ||||
| 
 | ||||
| 		// Assert that we have the original count - self
 | ||||
| 		assert.Len(t, status.Peers(), len(TailscaleVersions)-1) | ||||
| 	} | ||||
| 
 | ||||
| 	headscale, err := scenario.Headscale() | ||||
| 	assert.NoError(t, err) | ||||
| 	assertNoErr(t, err) | ||||
| 
 | ||||
| 	// TODO(kradalby): This is Headscale specific and would not play nicely
 | ||||
| 	// with other implementations of the ControlServer interface
 | ||||
| 	result, err := headscale.Execute([]string{ | ||||
| 		"headscale", "nodes", "expire", "--identifier", "0", "--output", "json", | ||||
| 	}) | ||||
| 	assert.NoError(t, err) | ||||
| 	assertNoErr(t, err) | ||||
| 
 | ||||
| 	var machine v1.Machine | ||||
| 	err = json.Unmarshal([]byte(result), &machine) | ||||
| 	assert.NoError(t, err) | ||||
| 	assertNoErr(t, err) | ||||
| 
 | ||||
| 	time.Sleep(30 * time.Second) | ||||
| 
 | ||||
| @ -627,7 +524,7 @@ func TestExpireNode(t *testing.T) { | ||||
| 	// of connected nodes.
 | ||||
| 	for _, client := range allClients { | ||||
| 		status, err := client.Status() | ||||
| 		assert.NoError(t, err) | ||||
| 		assertNoErr(t, err) | ||||
| 
 | ||||
| 		for _, peerKey := range status.Peers() { | ||||
| 			peerStatus := status.Peer[peerKey] | ||||
| @ -642,9 +539,4 @@ func TestExpireNode(t *testing.T) { | ||||
| 			assert.Len(t, status.Peers(), len(TailscaleVersions)-2) | ||||
| 		} | ||||
| 	} | ||||
| 
 | ||||
| 	err = scenario.Shutdown() | ||||
| 	if err != nil { | ||||
| 		t.Errorf("failed to tear down scenario: %s", err) | ||||
| 	} | ||||
| } | ||||
|  | ||||
| @ -428,9 +428,9 @@ func (t *HeadscaleInContainer) GetHostname() string { | ||||
| 	return t.hostname | ||||
| } | ||||
| 
 | ||||
| // WaitForReady blocks until the Headscale instance is ready to
 | ||||
| // WaitForRunning blocks until the Headscale instance is ready to
 | ||||
| // serve clients.
 | ||||
| func (t *HeadscaleInContainer) WaitForReady() error { | ||||
| func (t *HeadscaleInContainer) WaitForRunning() error { | ||||
| 	url := t.GetHealthEndpoint() | ||||
| 
 | ||||
| 	log.Printf("waiting for headscale to be ready at %s", url) | ||||
|  | ||||
| @ -16,6 +16,7 @@ import ( | ||||
| 	"github.com/juanfont/headscale/integration/tsic" | ||||
| 	"github.com/ory/dockertest/v3" | ||||
| 	"github.com/puzpuzpuz/xsync/v2" | ||||
| 	"golang.org/x/sync/errgroup" | ||||
| ) | ||||
| 
 | ||||
| const ( | ||||
| @ -33,30 +34,33 @@ var ( | ||||
| 	tailscaleVersions2021 = []string{ | ||||
| 		"head", | ||||
| 		"unstable", | ||||
| 		"1.40.0", | ||||
| 		"1.38.4", | ||||
| 		"1.36.2", | ||||
| 		"1.34.2", | ||||
| 		"1.32.3", | ||||
| 		"1.30.2", | ||||
| 		"1.48", | ||||
| 		"1.46", | ||||
| 		"1.44", | ||||
| 		"1.42", | ||||
| 		"1.40", | ||||
| 		"1.38", | ||||
| 		"1.36", | ||||
| 		"1.34", | ||||
| 		"1.32", | ||||
| 		"1.30", | ||||
| 	} | ||||
| 
 | ||||
| 	tailscaleVersions2019 = []string{ | ||||
| 		"1.28.0", | ||||
| 		"1.26.2", | ||||
| 		"1.24.2", | ||||
| 		"1.22.2", | ||||
| 		"1.20.4", | ||||
| 		"1.28", | ||||
| 		"1.26", | ||||
| 		"1.24", | ||||
| 		"1.22", | ||||
| 		"1.20", | ||||
| 		"1.18", | ||||
| 	} | ||||
| 
 | ||||
| 	// tailscaleVersionsUnavailable = []string{
 | ||||
| 	// 	// These versions seem to fail when fetching from apt.
 | ||||
| 	//  "1.18.2",
 | ||||
| 	// 	"1.16.2",
 | ||||
| 	// 	"1.14.6",
 | ||||
| 	// 	"1.12.4",
 | ||||
| 	// 	"1.10.2",
 | ||||
| 	// 	"1.8.7",
 | ||||
| 	// "1.14.6",
 | ||||
| 	// "1.12.4",
 | ||||
| 	// "1.10.2",
 | ||||
| 	// "1.8.7",
 | ||||
| 	// }.
 | ||||
| 
 | ||||
| 	// TailscaleVersions represents a list of Tailscale versions the suite
 | ||||
| @ -79,9 +83,9 @@ var ( | ||||
| type User struct { | ||||
| 	Clients map[string]TailscaleClient | ||||
| 
 | ||||
| 	createWaitGroup sync.WaitGroup | ||||
| 	joinWaitGroup   sync.WaitGroup | ||||
| 	syncWaitGroup   sync.WaitGroup | ||||
| 	createWaitGroup errgroup.Group | ||||
| 	joinWaitGroup   errgroup.Group | ||||
| 	syncWaitGroup   errgroup.Group | ||||
| } | ||||
| 
 | ||||
| // Scenario is a representation of an environment with one ControlServer and
 | ||||
| @ -148,7 +152,7 @@ func NewScenario() (*Scenario, error) { | ||||
| // and networks associated with it.
 | ||||
| // In addition, it will save the logs of the ControlServer to `/tmp/control` in the
 | ||||
| // environment running the tests.
 | ||||
| func (s *Scenario) Shutdown() error { | ||||
| func (s *Scenario) Shutdown() { | ||||
| 	s.controlServers.Range(func(_ string, control ControlServer) bool { | ||||
| 		err := control.Shutdown() | ||||
| 		if err != nil { | ||||
| @ -166,21 +170,19 @@ func (s *Scenario) Shutdown() error { | ||||
| 			log.Printf("removing client %s in user %s", client.Hostname(), userName) | ||||
| 			err := client.Shutdown() | ||||
| 			if err != nil { | ||||
| 				return fmt.Errorf("failed to tear down client: %w", err) | ||||
| 				log.Printf("failed to tear down client: %s", err) | ||||
| 			} | ||||
| 		} | ||||
| 	} | ||||
| 
 | ||||
| 	if err := s.pool.RemoveNetwork(s.network); err != nil { | ||||
| 		return fmt.Errorf("failed to remove network: %w", err) | ||||
| 		log.Printf("failed to remove network: %s", err) | ||||
| 	} | ||||
| 
 | ||||
| 	// TODO(kradalby): This seem redundant to the previous call
 | ||||
| 	// if err := s.network.Close(); err != nil {
 | ||||
| 	// 	return fmt.Errorf("failed to tear down network: %w", err)
 | ||||
| 	// }
 | ||||
| 
 | ||||
| 	return nil | ||||
| } | ||||
| 
 | ||||
| // Users returns the name of all users associated with the Scenario.
 | ||||
| @ -213,7 +215,7 @@ func (s *Scenario) Headscale(opts ...hsic.Option) (ControlServer, error) { | ||||
| 		return nil, fmt.Errorf("failed to create headscale container: %w", err) | ||||
| 	} | ||||
| 
 | ||||
| 	err = headscale.WaitForReady() | ||||
| 	err = headscale.WaitForRunning() | ||||
| 	if err != nil { | ||||
| 		return nil, fmt.Errorf("failed reach headscale container: %w", err) | ||||
| 	} | ||||
| @ -286,17 +288,12 @@ func (s *Scenario) CreateTailscaleNodesInUser( | ||||
| 			cert := headscale.GetCert() | ||||
| 			hostname := headscale.GetHostname() | ||||
| 
 | ||||
| 			user.createWaitGroup.Add(1) | ||||
| 
 | ||||
| 			opts = append(opts, | ||||
| 				tsic.WithHeadscaleTLS(cert), | ||||
| 				tsic.WithHeadscaleName(hostname), | ||||
| 			) | ||||
| 
 | ||||
| 			go func() { | ||||
| 				defer user.createWaitGroup.Done() | ||||
| 
 | ||||
| 				// TODO(kradalby): error handle this
 | ||||
| 			user.createWaitGroup.Go(func() error { | ||||
| 				tsClient, err := tsic.New( | ||||
| 					s.pool, | ||||
| 					version, | ||||
| @ -304,20 +301,30 @@ func (s *Scenario) CreateTailscaleNodesInUser( | ||||
| 					opts..., | ||||
| 				) | ||||
| 				if err != nil { | ||||
| 					// return fmt.Errorf("failed to add tailscale node: %w", err)
 | ||||
| 					log.Printf("failed to create tailscale node: %s", err) | ||||
| 					return fmt.Errorf( | ||||
| 						"failed to create tailscale (%s) node: %w", | ||||
| 						tsClient.Hostname(), | ||||
| 						err, | ||||
| 					) | ||||
| 				} | ||||
| 
 | ||||
| 				err = tsClient.WaitForReady() | ||||
| 				err = tsClient.WaitForNeedsLogin() | ||||
| 				if err != nil { | ||||
| 					// return fmt.Errorf("failed to add tailscale node: %w", err)
 | ||||
| 					log.Printf("failed to wait for tailscaled: %s", err) | ||||
| 					return fmt.Errorf( | ||||
| 						"failed to wait for tailscaled (%s) to need login: %w", | ||||
| 						tsClient.Hostname(), | ||||
| 						err, | ||||
| 					) | ||||
| 				} | ||||
| 
 | ||||
| 				user.Clients[tsClient.Hostname()] = tsClient | ||||
| 			}() | ||||
| 
 | ||||
| 				return nil | ||||
| 			}) | ||||
| 		} | ||||
| 		if err := user.createWaitGroup.Wait(); err != nil { | ||||
| 			return err | ||||
| 		} | ||||
| 		user.createWaitGroup.Wait() | ||||
| 
 | ||||
| 		return nil | ||||
| 	} | ||||
| @ -332,29 +339,20 @@ func (s *Scenario) RunTailscaleUp( | ||||
| ) error { | ||||
| 	if user, ok := s.users[userStr]; ok { | ||||
| 		for _, client := range user.Clients { | ||||
| 			user.joinWaitGroup.Add(1) | ||||
| 
 | ||||
| 			go func(c TailscaleClient) { | ||||
| 				defer user.joinWaitGroup.Done() | ||||
| 
 | ||||
| 				// TODO(kradalby): error handle this
 | ||||
| 				_ = c.Up(loginServer, authKey) | ||||
| 			}(client) | ||||
| 
 | ||||
| 			err := client.WaitForReady() | ||||
| 			if err != nil { | ||||
| 				log.Printf("error waiting for client %s to be ready: %s", client.Hostname(), err) | ||||
| 			} | ||||
| 			c := client | ||||
| 			user.joinWaitGroup.Go(func() error { | ||||
| 				return c.Login(loginServer, authKey) | ||||
| 			}) | ||||
| 		} | ||||
| 
 | ||||
| 		user.joinWaitGroup.Wait() | ||||
| 		if err := user.joinWaitGroup.Wait(); err != nil { | ||||
| 			return err | ||||
| 		} | ||||
| 
 | ||||
| 		for _, client := range user.Clients { | ||||
| 			err := client.WaitForReady() | ||||
| 			err := client.WaitForRunning() | ||||
| 			if err != nil { | ||||
| 				log.Printf("client %s was not ready: %s", client.Hostname(), err) | ||||
| 
 | ||||
| 				return fmt.Errorf("failed to up tailscale node: %w", err) | ||||
| 				return fmt.Errorf("%s failed to up tailscale node: %w", client.Hostname(), err) | ||||
| 			} | ||||
| 		} | ||||
| 
 | ||||
| @ -381,18 +379,22 @@ func (s *Scenario) CountTailscale() int { | ||||
| func (s *Scenario) WaitForTailscaleSync() error { | ||||
| 	tsCount := s.CountTailscale() | ||||
| 
 | ||||
| 	return s.WaitForTailscaleSyncWithPeerCount(tsCount - 1) | ||||
| } | ||||
| 
 | ||||
| // WaitForTailscaleSyncWithPeerCount blocks execution until all the TailscaleClient reports
 | ||||
| // to have all other TailscaleClients present in their netmap.NetworkMap.
 | ||||
| func (s *Scenario) WaitForTailscaleSyncWithPeerCount(peerCount int) error { | ||||
| 	for _, user := range s.users { | ||||
| 		for _, client := range user.Clients { | ||||
| 			user.syncWaitGroup.Add(1) | ||||
| 
 | ||||
| 			go func(c TailscaleClient) { | ||||
| 				defer user.syncWaitGroup.Done() | ||||
| 
 | ||||
| 				// TODO(kradalby): error handle this
 | ||||
| 				_ = c.WaitForPeers(tsCount) | ||||
| 			}(client) | ||||
| 			c := client | ||||
| 			user.syncWaitGroup.Go(func() error { | ||||
| 				return c.WaitForPeers(peerCount) | ||||
| 			}) | ||||
| 		} | ||||
| 		if err := user.syncWaitGroup.Wait(); err != nil { | ||||
| 			return err | ||||
| 		} | ||||
| 		user.syncWaitGroup.Wait() | ||||
| 	} | ||||
| 
 | ||||
| 	return nil | ||||
| @ -555,18 +557,18 @@ func (s *Scenario) ListTailscaleClientsFQDNs(users ...string) ([]string, error) | ||||
| 
 | ||||
| // WaitForTailscaleLogout blocks execution until all TailscaleClients have
 | ||||
| // logged out of the ControlServer.
 | ||||
| func (s *Scenario) WaitForTailscaleLogout() { | ||||
| func (s *Scenario) WaitForTailscaleLogout() error { | ||||
| 	for _, user := range s.users { | ||||
| 		for _, client := range user.Clients { | ||||
| 			user.syncWaitGroup.Add(1) | ||||
| 
 | ||||
| 			go func(c TailscaleClient) { | ||||
| 				defer user.syncWaitGroup.Done() | ||||
| 
 | ||||
| 				// TODO(kradalby): error handle this
 | ||||
| 				_ = c.WaitForLogout() | ||||
| 			}(client) | ||||
| 			c := client | ||||
| 			user.syncWaitGroup.Go(func() error { | ||||
| 				return c.WaitForLogout() | ||||
| 			}) | ||||
| 		} | ||||
| 		if err := user.syncWaitGroup.Wait(); err != nil { | ||||
| 			return err | ||||
| 		} | ||||
| 		user.syncWaitGroup.Wait() | ||||
| 	} | ||||
| 
 | ||||
| 	return nil | ||||
| } | ||||
|  | ||||
| @ -34,44 +34,38 @@ func TestHeadscale(t *testing.T) { | ||||
| 	user := "test-space" | ||||
| 
 | ||||
| 	scenario, err := NewScenario() | ||||
| 	if err != nil { | ||||
| 		t.Errorf("failed to create scenario: %s", err) | ||||
| 	} | ||||
| 	assertNoErr(t, err) | ||||
| 	defer scenario.Shutdown() | ||||
| 
 | ||||
| 	t.Run("start-headscale", func(t *testing.T) { | ||||
| 		headscale, err := scenario.Headscale() | ||||
| 		if err != nil { | ||||
| 			t.Errorf("failed to create start headcale: %s", err) | ||||
| 			t.Fatalf("failed to create start headcale: %s", err) | ||||
| 		} | ||||
| 
 | ||||
| 		err = headscale.WaitForReady() | ||||
| 		err = headscale.WaitForRunning() | ||||
| 		if err != nil { | ||||
| 			t.Errorf("headscale failed to become ready: %s", err) | ||||
| 			t.Fatalf("headscale failed to become ready: %s", err) | ||||
| 		} | ||||
| 	}) | ||||
| 
 | ||||
| 	t.Run("create-user", func(t *testing.T) { | ||||
| 		err := scenario.CreateUser(user) | ||||
| 		if err != nil { | ||||
| 			t.Errorf("failed to create user: %s", err) | ||||
| 			t.Fatalf("failed to create user: %s", err) | ||||
| 		} | ||||
| 
 | ||||
| 		if _, ok := scenario.users[user]; !ok { | ||||
| 			t.Errorf("user is not in scenario") | ||||
| 			t.Fatalf("user is not in scenario") | ||||
| 		} | ||||
| 	}) | ||||
| 
 | ||||
| 	t.Run("create-auth-key", func(t *testing.T) { | ||||
| 		_, err := scenario.CreatePreAuthKey(user, true, false) | ||||
| 		if err != nil { | ||||
| 			t.Errorf("failed to create preauthkey: %s", err) | ||||
| 			t.Fatalf("failed to create preauthkey: %s", err) | ||||
| 		} | ||||
| 	}) | ||||
| 
 | ||||
| 	err = scenario.Shutdown() | ||||
| 	if err != nil { | ||||
| 		t.Errorf("failed to tear down scenario: %s", err) | ||||
| 	} | ||||
| } | ||||
| 
 | ||||
| // If subtests are parallel, then they will start before setup is run.
 | ||||
| @ -85,9 +79,8 @@ func TestCreateTailscale(t *testing.T) { | ||||
| 	user := "only-create-containers" | ||||
| 
 | ||||
| 	scenario, err := NewScenario() | ||||
| 	if err != nil { | ||||
| 		t.Errorf("failed to create scenario: %s", err) | ||||
| 	} | ||||
| 	assertNoErr(t, err) | ||||
| 	defer scenario.Shutdown() | ||||
| 
 | ||||
| 	scenario.users[user] = &User{ | ||||
| 		Clients: make(map[string]TailscaleClient), | ||||
| @ -96,20 +89,15 @@ func TestCreateTailscale(t *testing.T) { | ||||
| 	t.Run("create-tailscale", func(t *testing.T) { | ||||
| 		err := scenario.CreateTailscaleNodesInUser(user, "all", 3) | ||||
| 		if err != nil { | ||||
| 			t.Errorf("failed to add tailscale nodes: %s", err) | ||||
| 			t.Fatalf("failed to add tailscale nodes: %s", err) | ||||
| 		} | ||||
| 
 | ||||
| 		if clients := len(scenario.users[user].Clients); clients != 3 { | ||||
| 			t.Errorf("wrong number of tailscale clients: %d != %d", clients, 3) | ||||
| 			t.Fatalf("wrong number of tailscale clients: %d != %d", clients, 3) | ||||
| 		} | ||||
| 
 | ||||
| 		// TODO(kradalby): Test "all" version logic
 | ||||
| 	}) | ||||
| 
 | ||||
| 	err = scenario.Shutdown() | ||||
| 	if err != nil { | ||||
| 		t.Errorf("failed to tear down scenario: %s", err) | ||||
| 	} | ||||
| } | ||||
| 
 | ||||
| // If subtests are parallel, then they will start before setup is run.
 | ||||
| @ -127,53 +115,52 @@ func TestTailscaleNodesJoiningHeadcale(t *testing.T) { | ||||
| 	count := 1 | ||||
| 
 | ||||
| 	scenario, err := NewScenario() | ||||
| 	if err != nil { | ||||
| 		t.Errorf("failed to create scenario: %s", err) | ||||
| 	} | ||||
| 	assertNoErr(t, err) | ||||
| 	defer scenario.Shutdown() | ||||
| 
 | ||||
| 	t.Run("start-headscale", func(t *testing.T) { | ||||
| 		headscale, err := scenario.Headscale() | ||||
| 		if err != nil { | ||||
| 			t.Errorf("failed to create start headcale: %s", err) | ||||
| 			t.Fatalf("failed to create start headcale: %s", err) | ||||
| 		} | ||||
| 
 | ||||
| 		err = headscale.WaitForReady() | ||||
| 		err = headscale.WaitForRunning() | ||||
| 		if err != nil { | ||||
| 			t.Errorf("headscale failed to become ready: %s", err) | ||||
| 			t.Fatalf("headscale failed to become ready: %s", err) | ||||
| 		} | ||||
| 	}) | ||||
| 
 | ||||
| 	t.Run("create-user", func(t *testing.T) { | ||||
| 		err := scenario.CreateUser(user) | ||||
| 		if err != nil { | ||||
| 			t.Errorf("failed to create user: %s", err) | ||||
| 			t.Fatalf("failed to create user: %s", err) | ||||
| 		} | ||||
| 
 | ||||
| 		if _, ok := scenario.users[user]; !ok { | ||||
| 			t.Errorf("user is not in scenario") | ||||
| 			t.Fatalf("user is not in scenario") | ||||
| 		} | ||||
| 	}) | ||||
| 
 | ||||
| 	t.Run("create-tailscale", func(t *testing.T) { | ||||
| 		err := scenario.CreateTailscaleNodesInUser(user, "1.30.2", count) | ||||
| 		if err != nil { | ||||
| 			t.Errorf("failed to add tailscale nodes: %s", err) | ||||
| 			t.Fatalf("failed to add tailscale nodes: %s", err) | ||||
| 		} | ||||
| 
 | ||||
| 		if clients := len(scenario.users[user].Clients); clients != count { | ||||
| 			t.Errorf("wrong number of tailscale clients: %d != %d", clients, count) | ||||
| 			t.Fatalf("wrong number of tailscale clients: %d != %d", clients, count) | ||||
| 		} | ||||
| 	}) | ||||
| 
 | ||||
| 	t.Run("join-headscale", func(t *testing.T) { | ||||
| 		key, err := scenario.CreatePreAuthKey(user, true, false) | ||||
| 		if err != nil { | ||||
| 			t.Errorf("failed to create preauthkey: %s", err) | ||||
| 			t.Fatalf("failed to create preauthkey: %s", err) | ||||
| 		} | ||||
| 
 | ||||
| 		headscale, err := scenario.Headscale() | ||||
| 		if err != nil { | ||||
| 			t.Errorf("failed to create start headcale: %s", err) | ||||
| 			t.Fatalf("failed to create start headcale: %s", err) | ||||
| 		} | ||||
| 
 | ||||
| 		err = scenario.RunTailscaleUp( | ||||
| @ -182,23 +169,18 @@ func TestTailscaleNodesJoiningHeadcale(t *testing.T) { | ||||
| 			key.GetKey(), | ||||
| 		) | ||||
| 		if err != nil { | ||||
| 			t.Errorf("failed to login: %s", err) | ||||
| 			t.Fatalf("failed to login: %s", err) | ||||
| 		} | ||||
| 	}) | ||||
| 
 | ||||
| 	t.Run("get-ips", func(t *testing.T) { | ||||
| 		ips, err := scenario.GetIPs(user) | ||||
| 		if err != nil { | ||||
| 			t.Errorf("failed to get tailscale ips: %s", err) | ||||
| 			t.Fatalf("failed to get tailscale ips: %s", err) | ||||
| 		} | ||||
| 
 | ||||
| 		if len(ips) != count*2 { | ||||
| 			t.Errorf("got the wrong amount of tailscale ips, %d != %d", len(ips), count*2) | ||||
| 			t.Fatalf("got the wrong amount of tailscale ips, %d != %d", len(ips), count*2) | ||||
| 		} | ||||
| 	}) | ||||
| 
 | ||||
| 	err = scenario.Shutdown() | ||||
| 	if err != nil { | ||||
| 		t.Errorf("failed to tear down scenario: %s", err) | ||||
| 	} | ||||
| } | ||||
|  | ||||
| @ -41,65 +41,79 @@ var retry = func(times int, sleepInterval time.Duration, | ||||
| 	return result, stderr, err | ||||
| } | ||||
| 
 | ||||
| func TestSSHOneUserAllToAll(t *testing.T) { | ||||
| 	IntegrationSkip(t) | ||||
| 	t.Parallel() | ||||
| 
 | ||||
| func sshScenario(t *testing.T, policy *policy.ACLPolicy, clientsPerUser int) *Scenario { | ||||
| 	t.Helper() | ||||
| 	scenario, err := NewScenario() | ||||
| 	if err != nil { | ||||
| 		t.Errorf("failed to create scenario: %s", err) | ||||
| 	} | ||||
| 	assertNoErr(t, err) | ||||
| 
 | ||||
| 	spec := map[string]int{ | ||||
| 		"user1": len(TailscaleVersions) - 5, | ||||
| 		"user1": clientsPerUser, | ||||
| 		"user2": clientsPerUser, | ||||
| 	} | ||||
| 
 | ||||
| 	err = scenario.CreateHeadscaleEnv(spec, | ||||
| 		[]tsic.Option{tsic.WithSSH()}, | ||||
| 		hsic.WithACLPolicy( | ||||
| 			&policy.ACLPolicy{ | ||||
| 				Groups: map[string][]string{ | ||||
| 					"group:integration-test": {"user1"}, | ||||
| 				}, | ||||
| 				ACLs: []policy.ACL{ | ||||
| 					{ | ||||
| 						Action:       "accept", | ||||
| 						Sources:      []string{"*"}, | ||||
| 						Destinations: []string{"*:*"}, | ||||
| 					}, | ||||
| 				}, | ||||
| 				SSHs: []policy.SSH{ | ||||
| 					{ | ||||
| 						Action:       "accept", | ||||
| 						Sources:      []string{"group:integration-test"}, | ||||
| 						Destinations: []string{"group:integration-test"}, | ||||
| 						Users:        []string{"ssh-it-user"}, | ||||
| 					}, | ||||
| 				}, | ||||
| 			}, | ||||
| 		), | ||||
| 		[]tsic.Option{ | ||||
| 			tsic.WithDockerEntrypoint([]string{ | ||||
| 				"/bin/sh", | ||||
| 				"-c", | ||||
| 				"/bin/sleep 3 ; apk add openssh ; update-ca-certificates ; tailscaled --tun=tsdev", | ||||
| 			}), | ||||
| 			tsic.WithDockerWorkdir("/"), | ||||
| 		}, | ||||
| 		hsic.WithACLPolicy(policy), | ||||
| 		hsic.WithTestName("ssh"), | ||||
| 		hsic.WithConfigEnv(map[string]string{ | ||||
| 			"HEADSCALE_EXPERIMENTAL_FEATURE_SSH": "1", | ||||
| 		}), | ||||
| 	) | ||||
| 	if err != nil { | ||||
| 		t.Errorf("failed to create headscale environment: %s", err) | ||||
| 	} | ||||
| 
 | ||||
| 	allClients, err := scenario.ListTailscaleClients() | ||||
| 	if err != nil { | ||||
| 		t.Errorf("failed to get clients: %s", err) | ||||
| 	} | ||||
| 	assertNoErr(t, err) | ||||
| 
 | ||||
| 	err = scenario.WaitForTailscaleSync() | ||||
| 	if err != nil { | ||||
| 		t.Errorf("failed wait for tailscale clients to be in sync: %s", err) | ||||
| 	} | ||||
| 	assertNoErr(t, err) | ||||
| 
 | ||||
| 	_, err = scenario.ListTailscaleClientsFQDNs() | ||||
| 	if err != nil { | ||||
| 		t.Errorf("failed to get FQDNs: %s", err) | ||||
| 	} | ||||
| 	assertNoErr(t, err) | ||||
| 
 | ||||
| 	return scenario | ||||
| } | ||||
| 
 | ||||
| func TestSSHOneUserAllToAll(t *testing.T) { | ||||
| 	IntegrationSkip(t) | ||||
| 	t.Parallel() | ||||
| 
 | ||||
| 	scenario := sshScenario(t, | ||||
| 		&policy.ACLPolicy{ | ||||
| 			Groups: map[string][]string{ | ||||
| 				"group:integration-test": {"user1"}, | ||||
| 			}, | ||||
| 			ACLs: []policy.ACL{ | ||||
| 				{ | ||||
| 					Action:       "accept", | ||||
| 					Sources:      []string{"*"}, | ||||
| 					Destinations: []string{"*:*"}, | ||||
| 				}, | ||||
| 			}, | ||||
| 			SSHs: []policy.SSH{ | ||||
| 				{ | ||||
| 					Action:       "accept", | ||||
| 					Sources:      []string{"group:integration-test"}, | ||||
| 					Destinations: []string{"group:integration-test"}, | ||||
| 					Users:        []string{"ssh-it-user"}, | ||||
| 				}, | ||||
| 			}, | ||||
| 		}, | ||||
| 		len(TailscaleVersions)-5, | ||||
| 	) | ||||
| 	defer scenario.Shutdown() | ||||
| 
 | ||||
| 	allClients, err := scenario.ListTailscaleClients() | ||||
| 	assertNoErrListClients(t, err) | ||||
| 
 | ||||
| 	err = scenario.WaitForTailscaleSync() | ||||
| 	assertNoErrSync(t, err) | ||||
| 
 | ||||
| 	_, err = scenario.ListTailscaleClientsFQDNs() | ||||
| 	assertNoErrListFQDN(t, err) | ||||
| 
 | ||||
| 	for _, client := range allClients { | ||||
| 		for _, peer := range allClients { | ||||
| @ -110,78 +124,48 @@ func TestSSHOneUserAllToAll(t *testing.T) { | ||||
| 			assertSSHHostname(t, client, peer) | ||||
| 		} | ||||
| 	} | ||||
| 
 | ||||
| 	err = scenario.Shutdown() | ||||
| 	if err != nil { | ||||
| 		t.Errorf("failed to tear down scenario: %s", err) | ||||
| 	} | ||||
| } | ||||
| 
 | ||||
| func TestSSHMultipleUsersAllToAll(t *testing.T) { | ||||
| 	IntegrationSkip(t) | ||||
| 	t.Parallel() | ||||
| 
 | ||||
| 	scenario, err := NewScenario() | ||||
| 	if err != nil { | ||||
| 		t.Errorf("failed to create scenario: %s", err) | ||||
| 	} | ||||
| 
 | ||||
| 	spec := map[string]int{ | ||||
| 		"user1": len(TailscaleVersions) - 5, | ||||
| 		"user2": len(TailscaleVersions) - 5, | ||||
| 	} | ||||
| 
 | ||||
| 	err = scenario.CreateHeadscaleEnv(spec, | ||||
| 		[]tsic.Option{tsic.WithSSH()}, | ||||
| 		hsic.WithACLPolicy( | ||||
| 			&policy.ACLPolicy{ | ||||
| 				Groups: map[string][]string{ | ||||
| 					"group:integration-test": {"user1", "user2"}, | ||||
| 				}, | ||||
| 				ACLs: []policy.ACL{ | ||||
| 					{ | ||||
| 						Action:       "accept", | ||||
| 						Sources:      []string{"*"}, | ||||
| 						Destinations: []string{"*:*"}, | ||||
| 					}, | ||||
| 				}, | ||||
| 				SSHs: []policy.SSH{ | ||||
| 					{ | ||||
| 						Action:       "accept", | ||||
| 						Sources:      []string{"group:integration-test"}, | ||||
| 						Destinations: []string{"group:integration-test"}, | ||||
| 						Users:        []string{"ssh-it-user"}, | ||||
| 					}, | ||||
| 	scenario := sshScenario(t, | ||||
| 		&policy.ACLPolicy{ | ||||
| 			Groups: map[string][]string{ | ||||
| 				"group:integration-test": {"user1", "user2"}, | ||||
| 			}, | ||||
| 			ACLs: []policy.ACL{ | ||||
| 				{ | ||||
| 					Action:       "accept", | ||||
| 					Sources:      []string{"*"}, | ||||
| 					Destinations: []string{"*:*"}, | ||||
| 				}, | ||||
| 			}, | ||||
| 		), | ||||
| 		hsic.WithConfigEnv(map[string]string{ | ||||
| 			"HEADSCALE_EXPERIMENTAL_FEATURE_SSH": "1", | ||||
| 		}), | ||||
| 			SSHs: []policy.SSH{ | ||||
| 				{ | ||||
| 					Action:       "accept", | ||||
| 					Sources:      []string{"group:integration-test"}, | ||||
| 					Destinations: []string{"group:integration-test"}, | ||||
| 					Users:        []string{"ssh-it-user"}, | ||||
| 				}, | ||||
| 			}, | ||||
| 		}, | ||||
| 		len(TailscaleVersions)-5, | ||||
| 	) | ||||
| 	if err != nil { | ||||
| 		t.Errorf("failed to create headscale environment: %s", err) | ||||
| 	} | ||||
| 	defer scenario.Shutdown() | ||||
| 
 | ||||
| 	nsOneClients, err := scenario.ListTailscaleClients("user1") | ||||
| 	if err != nil { | ||||
| 		t.Errorf("failed to get clients: %s", err) | ||||
| 	} | ||||
| 	assertNoErrListClients(t, err) | ||||
| 
 | ||||
| 	nsTwoClients, err := scenario.ListTailscaleClients("user2") | ||||
| 	if err != nil { | ||||
| 		t.Errorf("failed to get clients: %s", err) | ||||
| 	} | ||||
| 	assertNoErrListClients(t, err) | ||||
| 
 | ||||
| 	err = scenario.WaitForTailscaleSync() | ||||
| 	if err != nil { | ||||
| 		t.Errorf("failed wait for tailscale clients to be in sync: %s", err) | ||||
| 	} | ||||
| 	assertNoErrSync(t, err) | ||||
| 
 | ||||
| 	_, err = scenario.ListTailscaleClientsFQDNs() | ||||
| 	if err != nil { | ||||
| 		t.Errorf("failed to get FQDNs: %s", err) | ||||
| 	} | ||||
| 	assertNoErrListFQDN(t, err) | ||||
| 
 | ||||
| 	testInterUserSSH := func(sourceClients []TailscaleClient, targetClients []TailscaleClient) { | ||||
| 		for _, client := range sourceClients { | ||||
| @ -193,66 +177,38 @@ func TestSSHMultipleUsersAllToAll(t *testing.T) { | ||||
| 
 | ||||
| 	testInterUserSSH(nsOneClients, nsTwoClients) | ||||
| 	testInterUserSSH(nsTwoClients, nsOneClients) | ||||
| 
 | ||||
| 	err = scenario.Shutdown() | ||||
| 	if err != nil { | ||||
| 		t.Errorf("failed to tear down scenario: %s", err) | ||||
| 	} | ||||
| } | ||||
| 
 | ||||
| func TestSSHNoSSHConfigured(t *testing.T) { | ||||
| 	IntegrationSkip(t) | ||||
| 	t.Parallel() | ||||
| 
 | ||||
| 	scenario, err := NewScenario() | ||||
| 	if err != nil { | ||||
| 		t.Errorf("failed to create scenario: %s", err) | ||||
| 	} | ||||
| 
 | ||||
| 	spec := map[string]int{ | ||||
| 		"user1": len(TailscaleVersions) - 5, | ||||
| 	} | ||||
| 
 | ||||
| 	err = scenario.CreateHeadscaleEnv(spec, | ||||
| 		[]tsic.Option{tsic.WithSSH()}, | ||||
| 		hsic.WithACLPolicy( | ||||
| 			&policy.ACLPolicy{ | ||||
| 				Groups: map[string][]string{ | ||||
| 					"group:integration-test": {"user1"}, | ||||
| 				}, | ||||
| 				ACLs: []policy.ACL{ | ||||
| 					{ | ||||
| 						Action:       "accept", | ||||
| 						Sources:      []string{"*"}, | ||||
| 						Destinations: []string{"*:*"}, | ||||
| 					}, | ||||
| 				}, | ||||
| 				SSHs: []policy.SSH{}, | ||||
| 	scenario := sshScenario(t, | ||||
| 		&policy.ACLPolicy{ | ||||
| 			Groups: map[string][]string{ | ||||
| 				"group:integration-test": {"user1"}, | ||||
| 			}, | ||||
| 		), | ||||
| 		hsic.WithTestName("sshnoneconfigured"), | ||||
| 		hsic.WithConfigEnv(map[string]string{ | ||||
| 			"HEADSCALE_EXPERIMENTAL_FEATURE_SSH": "1", | ||||
| 		}), | ||||
| 			ACLs: []policy.ACL{ | ||||
| 				{ | ||||
| 					Action:       "accept", | ||||
| 					Sources:      []string{"*"}, | ||||
| 					Destinations: []string{"*:*"}, | ||||
| 				}, | ||||
| 			}, | ||||
| 			SSHs: []policy.SSH{}, | ||||
| 		}, | ||||
| 		len(TailscaleVersions)-5, | ||||
| 	) | ||||
| 	if err != nil { | ||||
| 		t.Errorf("failed to create headscale environment: %s", err) | ||||
| 	} | ||||
| 	defer scenario.Shutdown() | ||||
| 
 | ||||
| 	allClients, err := scenario.ListTailscaleClients() | ||||
| 	if err != nil { | ||||
| 		t.Errorf("failed to get clients: %s", err) | ||||
| 	} | ||||
| 	assertNoErrListClients(t, err) | ||||
| 
 | ||||
| 	err = scenario.WaitForTailscaleSync() | ||||
| 	if err != nil { | ||||
| 		t.Errorf("failed wait for tailscale clients to be in sync: %s", err) | ||||
| 	} | ||||
| 	assertNoErrSync(t, err) | ||||
| 
 | ||||
| 	_, err = scenario.ListTailscaleClientsFQDNs() | ||||
| 	if err != nil { | ||||
| 		t.Errorf("failed to get FQDNs: %s", err) | ||||
| 	} | ||||
| 	assertNoErrListFQDN(t, err) | ||||
| 
 | ||||
| 	for _, client := range allClients { | ||||
| 		for _, peer := range allClients { | ||||
| @ -263,73 +219,45 @@ func TestSSHNoSSHConfigured(t *testing.T) { | ||||
| 			assertSSHPermissionDenied(t, client, peer) | ||||
| 		} | ||||
| 	} | ||||
| 
 | ||||
| 	err = scenario.Shutdown() | ||||
| 	if err != nil { | ||||
| 		t.Errorf("failed to tear down scenario: %s", err) | ||||
| 	} | ||||
| } | ||||
| 
 | ||||
| func TestSSHIsBlockedInACL(t *testing.T) { | ||||
| 	IntegrationSkip(t) | ||||
| 	t.Parallel() | ||||
| 
 | ||||
| 	scenario, err := NewScenario() | ||||
| 	if err != nil { | ||||
| 		t.Errorf("failed to create scenario: %s", err) | ||||
| 	} | ||||
| 
 | ||||
| 	spec := map[string]int{ | ||||
| 		"user1": len(TailscaleVersions) - 5, | ||||
| 	} | ||||
| 
 | ||||
| 	err = scenario.CreateHeadscaleEnv(spec, | ||||
| 		[]tsic.Option{tsic.WithSSH()}, | ||||
| 		hsic.WithACLPolicy( | ||||
| 			&policy.ACLPolicy{ | ||||
| 				Groups: map[string][]string{ | ||||
| 					"group:integration-test": {"user1"}, | ||||
| 				}, | ||||
| 				ACLs: []policy.ACL{ | ||||
| 					{ | ||||
| 						Action:       "accept", | ||||
| 						Sources:      []string{"*"}, | ||||
| 						Destinations: []string{"*:80"}, | ||||
| 					}, | ||||
| 				}, | ||||
| 				SSHs: []policy.SSH{ | ||||
| 					{ | ||||
| 						Action:       "accept", | ||||
| 						Sources:      []string{"group:integration-test"}, | ||||
| 						Destinations: []string{"group:integration-test"}, | ||||
| 						Users:        []string{"ssh-it-user"}, | ||||
| 					}, | ||||
| 	scenario := sshScenario(t, | ||||
| 		&policy.ACLPolicy{ | ||||
| 			Groups: map[string][]string{ | ||||
| 				"group:integration-test": {"user1"}, | ||||
| 			}, | ||||
| 			ACLs: []policy.ACL{ | ||||
| 				{ | ||||
| 					Action:       "accept", | ||||
| 					Sources:      []string{"*"}, | ||||
| 					Destinations: []string{"*:80"}, | ||||
| 				}, | ||||
| 			}, | ||||
| 		), | ||||
| 		hsic.WithTestName("sshisblockedinacl"), | ||||
| 		hsic.WithConfigEnv(map[string]string{ | ||||
| 			"HEADSCALE_EXPERIMENTAL_FEATURE_SSH": "1", | ||||
| 		}), | ||||
| 			SSHs: []policy.SSH{ | ||||
| 				{ | ||||
| 					Action:       "accept", | ||||
| 					Sources:      []string{"group:integration-test"}, | ||||
| 					Destinations: []string{"group:integration-test"}, | ||||
| 					Users:        []string{"ssh-it-user"}, | ||||
| 				}, | ||||
| 			}, | ||||
| 		}, | ||||
| 		len(TailscaleVersions)-5, | ||||
| 	) | ||||
| 	if err != nil { | ||||
| 		t.Errorf("failed to create headscale environment: %s", err) | ||||
| 	} | ||||
| 	defer scenario.Shutdown() | ||||
| 
 | ||||
| 	allClients, err := scenario.ListTailscaleClients() | ||||
| 	if err != nil { | ||||
| 		t.Errorf("failed to get clients: %s", err) | ||||
| 	} | ||||
| 	assertNoErrListClients(t, err) | ||||
| 
 | ||||
| 	err = scenario.WaitForTailscaleSync() | ||||
| 	if err != nil { | ||||
| 		t.Errorf("failed wait for tailscale clients to be in sync: %s", err) | ||||
| 	} | ||||
| 	assertNoErrSync(t, err) | ||||
| 
 | ||||
| 	_, err = scenario.ListTailscaleClientsFQDNs() | ||||
| 	if err != nil { | ||||
| 		t.Errorf("failed to get FQDNs: %s", err) | ||||
| 	} | ||||
| 	assertNoErrListFQDN(t, err) | ||||
| 
 | ||||
| 	for _, client := range allClients { | ||||
| 		for _, peer := range allClients { | ||||
| @ -340,86 +268,55 @@ func TestSSHIsBlockedInACL(t *testing.T) { | ||||
| 			assertSSHTimeout(t, client, peer) | ||||
| 		} | ||||
| 	} | ||||
| 
 | ||||
| 	err = scenario.Shutdown() | ||||
| 	if err != nil { | ||||
| 		t.Errorf("failed to tear down scenario: %s", err) | ||||
| 	} | ||||
| } | ||||
| 
 | ||||
| func TestSSUserOnlyIsolation(t *testing.T) { | ||||
| 	IntegrationSkip(t) | ||||
| 	t.Parallel() | ||||
| 
 | ||||
| 	scenario, err := NewScenario() | ||||
| 	if err != nil { | ||||
| 		t.Errorf("failed to create scenario: %s", err) | ||||
| 	} | ||||
| 
 | ||||
| 	spec := map[string]int{ | ||||
| 		"useracl1": len(TailscaleVersions) - 5, | ||||
| 		"useracl2": len(TailscaleVersions) - 5, | ||||
| 	} | ||||
| 
 | ||||
| 	err = scenario.CreateHeadscaleEnv(spec, | ||||
| 		[]tsic.Option{tsic.WithSSH()}, | ||||
| 		hsic.WithACLPolicy( | ||||
| 			&policy.ACLPolicy{ | ||||
| 				Groups: map[string][]string{ | ||||
| 					"group:ssh1": {"useracl1"}, | ||||
| 					"group:ssh2": {"useracl2"}, | ||||
| 				}, | ||||
| 				ACLs: []policy.ACL{ | ||||
| 					{ | ||||
| 						Action:       "accept", | ||||
| 						Sources:      []string{"*"}, | ||||
| 						Destinations: []string{"*:*"}, | ||||
| 					}, | ||||
| 				}, | ||||
| 				SSHs: []policy.SSH{ | ||||
| 					{ | ||||
| 						Action:       "accept", | ||||
| 						Sources:      []string{"group:ssh1"}, | ||||
| 						Destinations: []string{"group:ssh1"}, | ||||
| 						Users:        []string{"ssh-it-user"}, | ||||
| 					}, | ||||
| 					{ | ||||
| 						Action:       "accept", | ||||
| 						Sources:      []string{"group:ssh2"}, | ||||
| 						Destinations: []string{"group:ssh2"}, | ||||
| 						Users:        []string{"ssh-it-user"}, | ||||
| 					}, | ||||
| 	scenario := sshScenario(t, | ||||
| 		&policy.ACLPolicy{ | ||||
| 			Groups: map[string][]string{ | ||||
| 				"group:ssh1": {"user1"}, | ||||
| 				"group:ssh2": {"user2"}, | ||||
| 			}, | ||||
| 			ACLs: []policy.ACL{ | ||||
| 				{ | ||||
| 					Action:       "accept", | ||||
| 					Sources:      []string{"*"}, | ||||
| 					Destinations: []string{"*:*"}, | ||||
| 				}, | ||||
| 			}, | ||||
| 		), | ||||
| 		hsic.WithTestName("sshtwouseraclblock"), | ||||
| 		hsic.WithConfigEnv(map[string]string{ | ||||
| 			"HEADSCALE_EXPERIMENTAL_FEATURE_SSH": "1", | ||||
| 		}), | ||||
| 			SSHs: []policy.SSH{ | ||||
| 				{ | ||||
| 					Action:       "accept", | ||||
| 					Sources:      []string{"group:ssh1"}, | ||||
| 					Destinations: []string{"group:ssh1"}, | ||||
| 					Users:        []string{"ssh-it-user"}, | ||||
| 				}, | ||||
| 				{ | ||||
| 					Action:       "accept", | ||||
| 					Sources:      []string{"group:ssh2"}, | ||||
| 					Destinations: []string{"group:ssh2"}, | ||||
| 					Users:        []string{"ssh-it-user"}, | ||||
| 				}, | ||||
| 			}, | ||||
| 		}, | ||||
| 		len(TailscaleVersions)-5, | ||||
| 	) | ||||
| 	if err != nil { | ||||
| 		t.Errorf("failed to create headscale environment: %s", err) | ||||
| 	} | ||||
| 	defer scenario.Shutdown() | ||||
| 
 | ||||
| 	ssh1Clients, err := scenario.ListTailscaleClients("useracl1") | ||||
| 	if err != nil { | ||||
| 		t.Errorf("failed to get clients: %s", err) | ||||
| 	} | ||||
| 	ssh1Clients, err := scenario.ListTailscaleClients("user1") | ||||
| 	assertNoErrListClients(t, err) | ||||
| 
 | ||||
| 	ssh2Clients, err := scenario.ListTailscaleClients("useracl2") | ||||
| 	if err != nil { | ||||
| 		t.Errorf("failed to get clients: %s", err) | ||||
| 	} | ||||
| 	ssh2Clients, err := scenario.ListTailscaleClients("user2") | ||||
| 	assertNoErrListClients(t, err) | ||||
| 
 | ||||
| 	err = scenario.WaitForTailscaleSync() | ||||
| 	if err != nil { | ||||
| 		t.Errorf("failed wait for tailscale clients to be in sync: %s", err) | ||||
| 	} | ||||
| 	assertNoErrSync(t, err) | ||||
| 
 | ||||
| 	_, err = scenario.ListTailscaleClientsFQDNs() | ||||
| 	if err != nil { | ||||
| 		t.Errorf("failed to get FQDNs: %s", err) | ||||
| 	} | ||||
| 	assertNoErrListFQDN(t, err) | ||||
| 
 | ||||
| 	for _, client := range ssh1Clients { | ||||
| 		for _, peer := range ssh2Clients { | ||||
| @ -460,11 +357,6 @@ func TestSSUserOnlyIsolation(t *testing.T) { | ||||
| 			assertSSHHostname(t, client, peer) | ||||
| 		} | ||||
| 	} | ||||
| 
 | ||||
| 	err = scenario.Shutdown() | ||||
| 	if err != nil { | ||||
| 		t.Errorf("failed to tear down scenario: %s", err) | ||||
| 	} | ||||
| } | ||||
| 
 | ||||
| func doSSH(t *testing.T, client TailscaleClient, peer TailscaleClient) (string, string, error) { | ||||
| @ -487,7 +379,7 @@ func assertSSHHostname(t *testing.T, client TailscaleClient, peer TailscaleClien | ||||
| 	t.Helper() | ||||
| 
 | ||||
| 	result, _, err := doSSH(t, client, peer) | ||||
| 	assert.NoError(t, err) | ||||
| 	assertNoErr(t, err) | ||||
| 
 | ||||
| 	assert.Contains(t, peer.ID(), strings.ReplaceAll(result, "\n", "")) | ||||
| } | ||||
| @ -507,7 +399,7 @@ func assertSSHTimeout(t *testing.T, client TailscaleClient, peer TailscaleClient | ||||
| 	t.Helper() | ||||
| 
 | ||||
| 	result, stderr, err := doSSH(t, client, peer) | ||||
| 	assert.NoError(t, err) | ||||
| 	assertNoErr(t, err) | ||||
| 
 | ||||
| 	assert.Empty(t, result) | ||||
| 
 | ||||
|  | ||||
| @ -14,14 +14,18 @@ type TailscaleClient interface { | ||||
| 	Hostname() string | ||||
| 	Shutdown() error | ||||
| 	Version() string | ||||
| 	Execute(command []string, options ...dockertestutil.ExecuteCommandOption) (string, string, error) | ||||
| 	Up(loginServer, authKey string) error | ||||
| 	UpWithLoginURL(loginServer string) (*url.URL, error) | ||||
| 	Execute( | ||||
| 		command []string, | ||||
| 		options ...dockertestutil.ExecuteCommandOption, | ||||
| 	) (string, string, error) | ||||
| 	Login(loginServer, authKey string) error | ||||
| 	LoginWithURL(loginServer string) (*url.URL, error) | ||||
| 	Logout() error | ||||
| 	IPs() ([]netip.Addr, error) | ||||
| 	FQDN() (string, error) | ||||
| 	Status() (*ipnstate.Status, error) | ||||
| 	WaitForReady() error | ||||
| 	WaitForNeedsLogin() error | ||||
| 	WaitForRunning() error | ||||
| 	WaitForLogout() error | ||||
| 	WaitForPeers(expected int) error | ||||
| 	Ping(hostnameOrIP string, opts ...tsic.PingOption) error | ||||
|  | ||||
| @ -34,9 +34,14 @@ var ( | ||||
| 	errTailscaleWrongPeerCount         = errors.New("wrong peer count") | ||||
| 	errTailscaleCannotUpWithoutAuthkey = errors.New("cannot up without authkey") | ||||
| 	errTailscaleNotConnected           = errors.New("tailscale not connected") | ||||
| 	errTailscaledNotReadyForLogin      = errors.New("tailscaled not ready for login") | ||||
| 	errTailscaleNotLoggedOut           = errors.New("tailscale not logged out") | ||||
| ) | ||||
| 
 | ||||
| func errTailscaleStatus(hostname string, err error) error { | ||||
| 	return fmt.Errorf("%s failed to fetch tailscale status: %w", hostname, err) | ||||
| } | ||||
| 
 | ||||
| // TailscaleInContainer is an implementation of TailscaleClient which
 | ||||
| // sets up a Tailscale instance inside a container.
 | ||||
| type TailscaleInContainer struct { | ||||
| @ -165,7 +170,7 @@ func New( | ||||
| 		network: network, | ||||
| 
 | ||||
| 		withEntrypoint: []string{ | ||||
| 			"/bin/bash", | ||||
| 			"/bin/sh", | ||||
| 			"-c", | ||||
| 			"/bin/sleep 3 ; update-ca-certificates ; tailscaled --tun=tsdev", | ||||
| 		}, | ||||
| @ -204,16 +209,48 @@ func New( | ||||
| 		return nil, err | ||||
| 	} | ||||
| 
 | ||||
| 	container, err := pool.BuildAndRunWithBuildOptions( | ||||
| 		createTailscaleBuildOptions(version), | ||||
| 		tailscaleOptions, | ||||
| 		dockertestutil.DockerRestartPolicy, | ||||
| 		dockertestutil.DockerAllowLocalIPv6, | ||||
| 		dockertestutil.DockerAllowNetworkAdministration, | ||||
| 	) | ||||
| 	var container *dockertest.Resource | ||||
| 	switch version { | ||||
| 	case "head": | ||||
| 		buildOptions := &dockertest.BuildOptions{ | ||||
| 			Dockerfile: "Dockerfile.tailscale-HEAD", | ||||
| 			ContextDir: dockerContextPath, | ||||
| 			BuildArgs:  []docker.BuildArg{}, | ||||
| 		} | ||||
| 
 | ||||
| 		container, err = pool.BuildAndRunWithBuildOptions( | ||||
| 			buildOptions, | ||||
| 			tailscaleOptions, | ||||
| 			dockertestutil.DockerRestartPolicy, | ||||
| 			dockertestutil.DockerAllowLocalIPv6, | ||||
| 			dockertestutil.DockerAllowNetworkAdministration, | ||||
| 		) | ||||
| 	case "unstable": | ||||
| 		tailscaleOptions.Repository = "tailscale/tailscale" | ||||
| 		tailscaleOptions.Tag = version | ||||
| 
 | ||||
| 		container, err = pool.RunWithOptions( | ||||
| 			tailscaleOptions, | ||||
| 			dockertestutil.DockerRestartPolicy, | ||||
| 			dockertestutil.DockerAllowLocalIPv6, | ||||
| 			dockertestutil.DockerAllowNetworkAdministration, | ||||
| 		) | ||||
| 	default: | ||||
| 		tailscaleOptions.Repository = "tailscale/tailscale" | ||||
| 		tailscaleOptions.Tag = "v" + version | ||||
| 
 | ||||
| 		container, err = pool.RunWithOptions( | ||||
| 			tailscaleOptions, | ||||
| 			dockertestutil.DockerRestartPolicy, | ||||
| 			dockertestutil.DockerAllowLocalIPv6, | ||||
| 			dockertestutil.DockerAllowNetworkAdministration, | ||||
| 		) | ||||
| 	} | ||||
| 
 | ||||
| 	if err != nil { | ||||
| 		return nil, fmt.Errorf( | ||||
| 			"could not start tailscale container (version: %s): %w", | ||||
| 			"%s could not start tailscale container (version: %s): %w", | ||||
| 			hostname, | ||||
| 			version, | ||||
| 			err, | ||||
| 		) | ||||
| @ -270,7 +307,7 @@ func (t *TailscaleInContainer) Execute( | ||||
| 		options..., | ||||
| 	) | ||||
| 	if err != nil { | ||||
| 		log.Printf("command stderr: %s\n", stderr) | ||||
| 		// log.Printf("command stderr: %s\n", stderr)
 | ||||
| 
 | ||||
| 		if stdout != "" { | ||||
| 			log.Printf("command stdout: %s\n", stdout) | ||||
| @ -288,18 +325,15 @@ func (t *TailscaleInContainer) Execute( | ||||
| 
 | ||||
| // Up runs the login routine on the given Tailscale instance.
 | ||||
| // This login mechanism uses the authorised key for authentication.
 | ||||
| func (t *TailscaleInContainer) Up( | ||||
| func (t *TailscaleInContainer) Login( | ||||
| 	loginServer, authKey string, | ||||
| ) error { | ||||
| 	command := []string{ | ||||
| 		"tailscale", | ||||
| 		"up", | ||||
| 		"-login-server", | ||||
| 		loginServer, | ||||
| 		"--authkey", | ||||
| 		authKey, | ||||
| 		"--hostname", | ||||
| 		t.hostname, | ||||
| 		"--login-server=" + loginServer, | ||||
| 		"--authkey=" + authKey, | ||||
| 		"--hostname=" + t.hostname, | ||||
| 	} | ||||
| 
 | ||||
| 	if t.withSSH { | ||||
| @ -313,7 +347,12 @@ func (t *TailscaleInContainer) Up( | ||||
| 	} | ||||
| 
 | ||||
| 	if _, _, err := t.Execute(command); err != nil { | ||||
| 		return fmt.Errorf("failed to join tailscale client: %w", err) | ||||
| 		return fmt.Errorf( | ||||
| 			"%s failed to join tailscale client (%s): %w", | ||||
| 			t.hostname, | ||||
| 			strings.Join(command, " "), | ||||
| 			err, | ||||
| 		) | ||||
| 	} | ||||
| 
 | ||||
| 	return nil | ||||
| @ -321,16 +360,14 @@ func (t *TailscaleInContainer) Up( | ||||
| 
 | ||||
| // Up runs the login routine on the given Tailscale instance.
 | ||||
| // This login mechanism uses web + command line flow for authentication.
 | ||||
| func (t *TailscaleInContainer) UpWithLoginURL( | ||||
| func (t *TailscaleInContainer) LoginWithURL( | ||||
| 	loginServer string, | ||||
| ) (*url.URL, error) { | ||||
| 	command := []string{ | ||||
| 		"tailscale", | ||||
| 		"up", | ||||
| 		"-login-server", | ||||
| 		loginServer, | ||||
| 		"--hostname", | ||||
| 		t.hostname, | ||||
| 		"--login-server=" + loginServer, | ||||
| 		"--hostname=" + t.hostname, | ||||
| 	} | ||||
| 
 | ||||
| 	_, stderr, err := t.Execute(command) | ||||
| @ -378,7 +415,7 @@ func (t *TailscaleInContainer) IPs() ([]netip.Addr, error) { | ||||
| 
 | ||||
| 	result, _, err := t.Execute(command) | ||||
| 	if err != nil { | ||||
| 		return []netip.Addr{}, fmt.Errorf("failed to join tailscale client: %w", err) | ||||
| 		return []netip.Addr{}, fmt.Errorf("%s failed to join tailscale client: %w", t.hostname, err) | ||||
| 	} | ||||
| 
 | ||||
| 	for _, address := range strings.Split(result, "\n") { | ||||
| @ -432,19 +469,37 @@ func (t *TailscaleInContainer) FQDN() (string, error) { | ||||
| 	return status.Self.DNSName, nil | ||||
| } | ||||
| 
 | ||||
| // WaitForReady blocks until the Tailscale (tailscaled) instance is ready
 | ||||
| // to login or be used.
 | ||||
| func (t *TailscaleInContainer) WaitForReady() error { | ||||
| // WaitForNeedsLogin blocks until the Tailscale (tailscaled) instance has
 | ||||
| // started and needs to be logged into.
 | ||||
| func (t *TailscaleInContainer) WaitForNeedsLogin() error { | ||||
| 	return t.pool.Retry(func() error { | ||||
| 		status, err := t.Status() | ||||
| 		if err != nil { | ||||
| 			return fmt.Errorf("failed to fetch tailscale status: %w", err) | ||||
| 			return errTailscaleStatus(t.hostname, err) | ||||
| 		} | ||||
| 
 | ||||
| 		if status.CurrentTailnet != nil { | ||||
| 		// ipnstate.Status.CurrentTailnet was added in Tailscale 1.22.0
 | ||||
| 		// https://github.com/tailscale/tailscale/pull/3865
 | ||||
| 		//
 | ||||
| 		// Before that, we can check the BackendState to see if the
 | ||||
| 		// tailscaled daemon is connected to the control system.
 | ||||
| 		if status.BackendState == "NeedsLogin" { | ||||
| 			return nil | ||||
| 		} | ||||
| 
 | ||||
| 		return errTailscaledNotReadyForLogin | ||||
| 	}) | ||||
| } | ||||
| 
 | ||||
| // WaitForRunning blocks until the Tailscale (tailscaled) instance is logged in
 | ||||
| // and ready to be used.
 | ||||
| func (t *TailscaleInContainer) WaitForRunning() error { | ||||
| 	return t.pool.Retry(func() error { | ||||
| 		status, err := t.Status() | ||||
| 		if err != nil { | ||||
| 			return errTailscaleStatus(t.hostname, err) | ||||
| 		} | ||||
| 
 | ||||
| 		// ipnstate.Status.CurrentTailnet was added in Tailscale 1.22.0
 | ||||
| 		// https://github.com/tailscale/tailscale/pull/3865
 | ||||
| 		//
 | ||||
| @ -460,10 +515,10 @@ func (t *TailscaleInContainer) WaitForReady() error { | ||||
| 
 | ||||
| // WaitForLogout blocks until the Tailscale instance has logged out.
 | ||||
| func (t *TailscaleInContainer) WaitForLogout() error { | ||||
| 	return t.pool.Retry(func() error { | ||||
| 	return fmt.Errorf("%s err: %w", t.hostname, t.pool.Retry(func() error { | ||||
| 		status, err := t.Status() | ||||
| 		if err != nil { | ||||
| 			return fmt.Errorf("failed to fetch tailscale status: %w", err) | ||||
| 			return errTailscaleStatus(t.hostname, err) | ||||
| 		} | ||||
| 
 | ||||
| 		if status.CurrentTailnet == nil { | ||||
| @ -471,7 +526,7 @@ func (t *TailscaleInContainer) WaitForLogout() error { | ||||
| 		} | ||||
| 
 | ||||
| 		return errTailscaleNotLoggedOut | ||||
| 	}) | ||||
| 	})) | ||||
| } | ||||
| 
 | ||||
| // WaitForPeers blocks until N number of peers is present in the
 | ||||
| @ -480,11 +535,17 @@ func (t *TailscaleInContainer) WaitForPeers(expected int) error { | ||||
| 	return t.pool.Retry(func() error { | ||||
| 		status, err := t.Status() | ||||
| 		if err != nil { | ||||
| 			return fmt.Errorf("failed to fetch tailscale status: %w", err) | ||||
| 			return errTailscaleStatus(t.hostname, err) | ||||
| 		} | ||||
| 
 | ||||
| 		if peers := status.Peers(); len(peers) != expected { | ||||
| 			return errTailscaleWrongPeerCount | ||||
| 			return fmt.Errorf( | ||||
| 				"%s err: %w expected %d, got %d", | ||||
| 				t.hostname, | ||||
| 				errTailscaleWrongPeerCount, | ||||
| 				expected, | ||||
| 				len(peers), | ||||
| 			) | ||||
| 		} | ||||
| 
 | ||||
| 		return nil | ||||
| @ -683,47 +744,3 @@ func (t *TailscaleInContainer) Curl(url string, opts ...CurlOption) (string, err | ||||
| func (t *TailscaleInContainer) WriteFile(path string, data []byte) error { | ||||
| 	return integrationutil.WriteFileToContainer(t.pool, t.container, path, data) | ||||
| } | ||||
| 
 | ||||
| func createTailscaleBuildOptions(version string) *dockertest.BuildOptions { | ||||
| 	var tailscaleBuildOptions *dockertest.BuildOptions | ||||
| 	switch version { | ||||
| 	case "head": | ||||
| 		tailscaleBuildOptions = &dockertest.BuildOptions{ | ||||
| 			Dockerfile: "Dockerfile.tailscale-HEAD", | ||||
| 			ContextDir: dockerContextPath, | ||||
| 			BuildArgs:  []docker.BuildArg{}, | ||||
| 		} | ||||
| 	case "unstable": | ||||
| 		tailscaleBuildOptions = &dockertest.BuildOptions{ | ||||
| 			Dockerfile: "Dockerfile.tailscale", | ||||
| 			ContextDir: dockerContextPath, | ||||
| 			BuildArgs: []docker.BuildArg{ | ||||
| 				{ | ||||
| 					Name:  "TAILSCALE_VERSION", | ||||
| 					Value: "*", // Installs the latest version https://askubuntu.com/a/824926
 | ||||
| 				}, | ||||
| 				{ | ||||
| 					Name:  "TAILSCALE_CHANNEL", | ||||
| 					Value: "unstable", | ||||
| 				}, | ||||
| 			}, | ||||
| 		} | ||||
| 	default: | ||||
| 		tailscaleBuildOptions = &dockertest.BuildOptions{ | ||||
| 			Dockerfile: "Dockerfile.tailscale", | ||||
| 			ContextDir: dockerContextPath, | ||||
| 			BuildArgs: []docker.BuildArg{ | ||||
| 				{ | ||||
| 					Name:  "TAILSCALE_VERSION", | ||||
| 					Value: version, | ||||
| 				}, | ||||
| 				{ | ||||
| 					Name:  "TAILSCALE_CHANNEL", | ||||
| 					Value: "stable", | ||||
| 				}, | ||||
| 			}, | ||||
| 		} | ||||
| 	} | ||||
| 
 | ||||
| 	return tailscaleBuildOptions | ||||
| } | ||||
|  | ||||
| @ -12,6 +12,53 @@ const ( | ||||
| 	derpPingCount   = 10 | ||||
| ) | ||||
| 
 | ||||
| func assertNoErr(t *testing.T, err error) { | ||||
| 	t.Helper() | ||||
| 	assertNoErrf(t, "unexpected error: %s", err) | ||||
| } | ||||
| 
 | ||||
| func assertNoErrf(t *testing.T, msg string, err error) { | ||||
| 	t.Helper() | ||||
| 	if err != nil { | ||||
| 		t.Fatalf(msg, err) | ||||
| 	} | ||||
| } | ||||
| 
 | ||||
| func assertNoErrHeadscaleEnv(t *testing.T, err error) { | ||||
| 	t.Helper() | ||||
| 	assertNoErrf(t, "failed to create headscale environment: %s", err) | ||||
| } | ||||
| 
 | ||||
| func assertNoErrGetHeadscale(t *testing.T, err error) { | ||||
| 	t.Helper() | ||||
| 	assertNoErrf(t, "failed to get headscale: %s", err) | ||||
| } | ||||
| 
 | ||||
| func assertNoErrListClients(t *testing.T, err error) { | ||||
| 	t.Helper() | ||||
| 	assertNoErrf(t, "failed to list clients: %s", err) | ||||
| } | ||||
| 
 | ||||
| func assertNoErrListClientIPs(t *testing.T, err error) { | ||||
| 	t.Helper() | ||||
| 	assertNoErrf(t, "failed to get client IPs: %s", err) | ||||
| } | ||||
| 
 | ||||
| func assertNoErrSync(t *testing.T, err error) { | ||||
| 	t.Helper() | ||||
| 	assertNoErrf(t, "failed to have all clients sync up: %s", err) | ||||
| } | ||||
| 
 | ||||
| func assertNoErrListFQDN(t *testing.T, err error) { | ||||
| 	t.Helper() | ||||
| 	assertNoErrf(t, "failed to list FQDNs: %s", err) | ||||
| } | ||||
| 
 | ||||
| func assertNoErrLogout(t *testing.T, err error) { | ||||
| 	t.Helper() | ||||
| 	assertNoErrf(t, "failed to log out tailscale nodes: %s", err) | ||||
| } | ||||
| 
 | ||||
| func pingAllHelper(t *testing.T, clients []TailscaleClient, addrs []string) int { | ||||
| 	t.Helper() | ||||
| 	success := 0 | ||||
| @ -20,7 +67,7 @@ func pingAllHelper(t *testing.T, clients []TailscaleClient, addrs []string) int | ||||
| 		for _, addr := range addrs { | ||||
| 			err := client.Ping(addr) | ||||
| 			if err != nil { | ||||
| 				t.Errorf("failed to ping %s from %s: %s", addr, client.Hostname(), err) | ||||
| 				t.Fatalf("failed to ping %s from %s: %s", addr, client.Hostname(), err) | ||||
| 			} else { | ||||
| 				success++ | ||||
| 			} | ||||
| @ -47,7 +94,7 @@ func pingDerpAllHelper(t *testing.T, clients []TailscaleClient, addrs []string) | ||||
| 				tsic.WithPingUntilDirect(false), | ||||
| 			) | ||||
| 			if err != nil { | ||||
| 				t.Errorf("failed to ping %s from %s: %s", addr, client.Hostname(), err) | ||||
| 				t.Fatalf("failed to ping %s from %s: %s", addr, client.Hostname(), err) | ||||
| 			} else { | ||||
| 				success++ | ||||
| 			} | ||||
|  | ||||
		Loading…
	
		Reference in New Issue
	
	Block a user