From c919da3ba5eac5daf6b0fa6b2e1d3c9e93630ceb Mon Sep 17 00:00:00 2001 From: Andrew Stakhov Date: Fri, 20 Mar 2020 14:50:02 -0400 Subject: [PATCH 1/3] Support for informers --- .dockerignore | 6 + .gitignore | 1 + examples/httpClientFactory/Program.cs | 3 +- examples/informers/ControllerService.cs | 27 ++ .../DeltaChangesQueryingController.cs | 111 ++++++ examples/informers/IController.cs | 17 + examples/informers/Program.cs | 22 ++ .../informers/Properties/launchSettings.json | 10 + .../ServiceCollectionKubernetesExtensions.cs | 49 +++ .../informers/appsettings.Development.json | 9 + examples/informers/appsettings.json | 9 + examples/informers/informers.csproj | 20 ++ kubernetes-client.sln | 15 + .../Controllers/ResourceEventDeltaBlock.cs | 258 ++++++++++++++ src/KubernetesClient/Extensions.cs | 202 +++++++++++ src/KubernetesClient/IKubernetes.Generic.cs | 124 +++++++ .../Informers/Cache/CacheSynchronized.cs | 26 ++ .../Informers/Cache/ICache.cs | 37 ++ .../Informers/Cache/SimpleCache.cs | 199 +++++++++++ .../Cache/VersionPartitionedSharedCache.cs | 294 ++++++++++++++++ .../Informers/FaultTolerance/Extensions.cs | 27 ++ .../Informers/FaultTolerance/RetryPolicy.cs | 77 +++++ src/KubernetesClient/Informers/IInformer.cs | 57 +++ .../Informers/IKubernetesInformer.cs | 11 + .../Informers/KubernetesInformer.cs | 164 +++++++++ .../Informers/KubernetesInformerOptions.cs | 16 + .../KubernetesKeyVersionEqualityComparer.cs | 37 ++ .../Informers/Notifications/EventTypeFlags.cs | 60 ++++ .../Informers/Notifications/ResourceEvent.cs | 124 +++++++ .../Informers/ResourceObservableExtensions.cs | 326 ++++++++++++++++++ .../Informers/ResourceStreamType.cs | 24 ++ .../Informers/SharedInformer.cs | 209 +++++++++++ .../Informers/SharedKubernetesInformer.cs | 46 +++ .../Informers/SharedOptionsInformer.cs | 39 +++ src/KubernetesClient/Kubernetes.ConfigInit.cs | 12 +- src/KubernetesClient/Kubernetes.Generic.cs | 212 ++++++++++++ src/KubernetesClient/KubernetesClient.csproj | 13 +- ...esClientConfiguration.HttpClientHandler.cs | 6 +- src/KubernetesClient/KubernetesObject.cs | 9 + src/KubernetesClient/Properties/Assembly.cs | 3 + src/KubernetesClient/Watcher.cs | 25 +- .../KubernetesClient.Tests/DeltaFifoTests.cs | 135 ++++++++ .../Kubernetes.WebSockets.Tests.cs | 2 +- .../KubernetesClient.Tests.csproj | 16 +- .../KubernetesResourceInformerTests.cs | 217 ++++++++++++ .../Mock/MockKubeApiServer.cs | 80 ++++- .../Mock/MockWebSocket.cs | 6 +- .../SharedInformerTests.cs | 306 ++++++++++++++++ .../StreamDemuxerTests.cs | 2 +- tests/KubernetesClient.Tests/TestData.cs | 156 +++++++++ .../Utils/ScheduledEvent.cs | 14 + .../Utils/TestCompleteException.cs | 8 + .../Utils/TestExtensions.cs | 170 +++++++++ .../Utils/TestResource.cs | 73 ++++ .../Utils/XunitLogger.cs | 37 ++ .../VersionPartitionedSharedCacheTests.cs | 161 +++++++++ tests/KubernetesClient.Tests/WatchTests.cs | 17 +- 57 files changed, 4302 insertions(+), 34 deletions(-) create mode 100644 .dockerignore create mode 100644 examples/informers/ControllerService.cs create mode 100644 examples/informers/DeltaChangesQueryingController.cs create mode 100644 examples/informers/IController.cs create mode 100644 examples/informers/Program.cs create mode 100644 examples/informers/Properties/launchSettings.json create mode 100644 examples/informers/ServiceCollectionKubernetesExtensions.cs create mode 100644 examples/informers/appsettings.Development.json create mode 100644 examples/informers/appsettings.json create mode 100644 examples/informers/informers.csproj create mode 100644 src/KubernetesClient/Controllers/ResourceEventDeltaBlock.cs create mode 100644 src/KubernetesClient/Extensions.cs create mode 100644 src/KubernetesClient/IKubernetes.Generic.cs create mode 100644 src/KubernetesClient/Informers/Cache/CacheSynchronized.cs create mode 100644 src/KubernetesClient/Informers/Cache/ICache.cs create mode 100644 src/KubernetesClient/Informers/Cache/SimpleCache.cs create mode 100644 src/KubernetesClient/Informers/Cache/VersionPartitionedSharedCache.cs create mode 100644 src/KubernetesClient/Informers/FaultTolerance/Extensions.cs create mode 100644 src/KubernetesClient/Informers/FaultTolerance/RetryPolicy.cs create mode 100644 src/KubernetesClient/Informers/IInformer.cs create mode 100644 src/KubernetesClient/Informers/IKubernetesInformer.cs create mode 100644 src/KubernetesClient/Informers/KubernetesInformer.cs create mode 100644 src/KubernetesClient/Informers/KubernetesInformerOptions.cs create mode 100644 src/KubernetesClient/Informers/KubernetesKeyVersionEqualityComparer.cs create mode 100644 src/KubernetesClient/Informers/Notifications/EventTypeFlags.cs create mode 100644 src/KubernetesClient/Informers/Notifications/ResourceEvent.cs create mode 100644 src/KubernetesClient/Informers/ResourceObservableExtensions.cs create mode 100644 src/KubernetesClient/Informers/ResourceStreamType.cs create mode 100644 src/KubernetesClient/Informers/SharedInformer.cs create mode 100644 src/KubernetesClient/Informers/SharedKubernetesInformer.cs create mode 100644 src/KubernetesClient/Informers/SharedOptionsInformer.cs create mode 100644 src/KubernetesClient/Kubernetes.Generic.cs create mode 100644 src/KubernetesClient/Properties/Assembly.cs create mode 100644 tests/KubernetesClient.Tests/DeltaFifoTests.cs create mode 100644 tests/KubernetesClient.Tests/KubernetesResourceInformerTests.cs create mode 100644 tests/KubernetesClient.Tests/SharedInformerTests.cs create mode 100644 tests/KubernetesClient.Tests/TestData.cs create mode 100644 tests/KubernetesClient.Tests/Utils/ScheduledEvent.cs create mode 100644 tests/KubernetesClient.Tests/Utils/TestCompleteException.cs create mode 100644 tests/KubernetesClient.Tests/Utils/TestExtensions.cs create mode 100644 tests/KubernetesClient.Tests/Utils/TestResource.cs create mode 100644 tests/KubernetesClient.Tests/Utils/XunitLogger.cs create mode 100644 tests/KubernetesClient.Tests/VersionPartitionedSharedCacheTests.cs diff --git a/.dockerignore b/.dockerignore new file mode 100644 index 000000000..21d4f655c --- /dev/null +++ b/.dockerignore @@ -0,0 +1,6 @@ +**/bin/ +**/obj/ +**/out/ +**/layer/ +**Dockerfile* +*/*.md \ No newline at end of file diff --git a/.gitignore b/.gitignore index 8244919f8..1dfdd1f98 100644 --- a/.gitignore +++ b/.gitignore @@ -13,3 +13,4 @@ bin/ # JetBrains Rider .idea/ *.sln.iml +examples/informers/Dockerfile diff --git a/examples/httpClientFactory/Program.cs b/examples/httpClientFactory/Program.cs index b3a2d92ca..db34e6bac 100644 --- a/examples/httpClientFactory/Program.cs +++ b/examples/httpClientFactory/Program.cs @@ -32,7 +32,8 @@ public static async Task Main(string[] args) serviceProvider.GetRequiredService(), httpClient); }) - .ConfigurePrimaryHttpMessageHandler(config.CreateDefaultHttpClientHandler); + .ConfigurePrimaryHttpMessageHandler(config.CreateDefaultHttpClientHandler) + .AddHttpMessageHandler(KubernetesClientConfiguration.CreateWatchHandler); // Add the class that uses the client services.AddHostedService(); diff --git a/examples/informers/ControllerService.cs b/examples/informers/ControllerService.cs new file mode 100644 index 000000000..d3c4710dd --- /dev/null +++ b/examples/informers/ControllerService.cs @@ -0,0 +1,27 @@ +using System; +using System.Linq; +using System.Threading; +using System.Threading.Tasks; +using Microsoft.Extensions.DependencyInjection; +using Microsoft.Extensions.Hosting; + +namespace informers +{ + public class ControllerService : BackgroundService + { + private readonly IServiceProvider _serviceProvider; + + public ControllerService(IServiceProvider serviceProvider) + { + _serviceProvider = serviceProvider; + } + + protected override async Task ExecuteAsync(CancellationToken stoppingToken) + { + var controllers = _serviceProvider.GetServices(); + await Task.WhenAll(controllers.Select(x => x.Initialize(stoppingToken))); + } + + + } +} \ No newline at end of file diff --git a/examples/informers/DeltaChangesQueryingController.cs b/examples/informers/DeltaChangesQueryingController.cs new file mode 100644 index 000000000..533204e51 --- /dev/null +++ b/examples/informers/DeltaChangesQueryingController.cs @@ -0,0 +1,111 @@ +using System; +using System.Collections.Generic; +using System.Linq; +using System.Reactive.Disposables; +using System.Reactive.Linq; +using System.Text; +using System.Threading; +using System.Threading.Tasks; +using System.Threading.Tasks.Dataflow; +using k8s; +using k8s.Informers; +using k8s.Informers.Notifications; +using k8s.Models; +using KellermanSoftware.CompareNetObjects; +using Microsoft.Extensions.Logging; +using Newtonsoft.Json; +using Newtonsoft.Json.Converters; + +namespace informers +{ + // this sample demos both informer and controller + // there are two loggers: + // _informerLogger lets you see raw data coming out of informer stream + // _reconcilerLogger lets you see batches of object transitions that object went through since last time we did work on it + // reconciler is purposely slowed down to show accumulation of events between worker actions + + // try creating and deleting some pods in "default" namespace and watch the output + + public class DeltaChangesQueryingController : IController + { + private readonly IKubernetesInformer _podInformer; + private readonly ILogger _reconcilerLogger; + private ILogger _informerLogger; + CompareLogic _objectCompare = new CompareLogic(); + // private readonly ActionBlock>> _reconciler; + private readonly CompositeDisposable _subscription = new CompositeDisposable(); + + public DeltaChangesQueryingController(IKubernetesInformer podInformer, ILoggerFactory loggerFactory) + { + _podInformer = podInformer; + _reconcilerLogger = loggerFactory.CreateLogger("Reconciler"); + _informerLogger = loggerFactory.CreateLogger("Informer"); + _objectCompare.Config.MaxDifferences = 100; + // the commented sections show how to use advanced syntax to work with TPL dataflows. most scenarios can get away with .ProcessWith helper method + + // _reconciler = new ActionBlock>>(Reconcile, + // new ExecutionDataflowBlockOptions() + // { + // BoundedCapacity = 2, + // MaxDegreeOfParallelism = 2, + // }); + } + + // public Task Completion => _reconciler.Completion; + + public Task Initialize(CancellationToken cancellationToken) + { + var workerQueue = _podInformer + .GetResource(ResourceStreamType.ListWatch, new KubernetesInformerOptions() { Namespace = "default"}) + // .Resync(TimeSpan.FromSeconds(5)) + .Do(item => + { + _informerLogger.LogInformation($"\n EventType: {item.EventFlags} \n Name: {item.Value.Metadata.Name} \n Version: {item.Value.Metadata.ResourceVersion}"); + }) + .Catch,Exception>(e => + { + _informerLogger.LogCritical(e, e.Message); + return Observable.Throw>(e); + }) + .ToResourceEventDeltaBlock(x => x.Metadata.Name, out var informerSubscription); + informerSubscription.DisposeWith(_subscription); + workerQueue + //.LinkTo(_reconciler) // working with action blocks directly for fine grained control + .ProcessWith(Reconcile, _reconcilerLogger) // simplified syntax + .DisposeWith(_subscription); + return Task.CompletedTask; + } + + private async Task Reconcile(List> changes) + { + // invoke reconcilation here + + var obj = changes.First().Value; + var sb = new StringBuilder(); + // sb.AppendLine($"Received changes for object with ID {KubernetesObject.KeySelector(obj)} with {changes.Count} items"); + sb.AppendLine($"Received changes for object with ID {KubernetesObject.KeySelector(obj)} with {changes.Count} items"); + sb.AppendLine($"Last known state was {changes.Last().EventFlags}"); + foreach (var item in changes) + { + sb.AppendLine($"==={item.EventFlags}==="); + sb.AppendLine($"Name: {item.Value.Metadata.Name}"); + sb.AppendLine($"Version: {item.Value.Metadata.ResourceVersion}"); + if (item.EventFlags.HasFlag(EventTypeFlags.Modify)) + { + var updateDelta = _objectCompare.Compare(item.OldValue, item.Value); + foreach (var difference in updateDelta.Differences) + { + sb.AppendLine($"{difference.PropertyName}: {difference.Object1} -> {difference.Object2}"); + } + + } + // sb.AppendLine(JsonConvert.SerializeObject(item, Formatting.Indented, new StringEnumConverter())); + } + _reconcilerLogger.LogInformation(sb.ToString()); + + await Task.Delay(TimeSpan.FromSeconds(10)); // simulate + await Task.CompletedTask; + + } + } +} diff --git a/examples/informers/IController.cs b/examples/informers/IController.cs new file mode 100644 index 000000000..049c34be2 --- /dev/null +++ b/examples/informers/IController.cs @@ -0,0 +1,17 @@ +using System.Threading; +using System.Threading.Tasks; + +namespace informers +{ + /// + /// Base interface for implementing controllers + /// + public interface IController + { + /// + /// Signals that controller is done processing all the work and no more work will ever be processed. + /// Mainly useful in testing + /// + public Task Initialize(CancellationToken cancellationToken); + } +} \ No newline at end of file diff --git a/examples/informers/Program.cs b/examples/informers/Program.cs new file mode 100644 index 000000000..bf5f3dcee --- /dev/null +++ b/examples/informers/Program.cs @@ -0,0 +1,22 @@ +using Microsoft.Extensions.Hosting; +using Microsoft.Extensions.Logging; + +namespace informers +{ + public class Program + { + public static void Main(string[] args) + { + CreateHostBuilder(args).Build().Run(); + } + + public static IHostBuilder CreateHostBuilder(string[] args) => + Host.CreateDefaultBuilder(args) + .ConfigureLogging(x => x.AddConsole()) + .ConfigureServices((hostContext, services) => + { + services.AddKubernetes(); + }); + + } +} diff --git a/examples/informers/Properties/launchSettings.json b/examples/informers/Properties/launchSettings.json new file mode 100644 index 000000000..9631e8a81 --- /dev/null +++ b/examples/informers/Properties/launchSettings.json @@ -0,0 +1,10 @@ +{ + "profiles": { + "K8SControllerExample": { + "commandName": "Project", + "environmentVariables": { + "DOTNET_ENVIRONMENT": "Development" + } + } + } +} diff --git a/examples/informers/ServiceCollectionKubernetesExtensions.cs b/examples/informers/ServiceCollectionKubernetesExtensions.cs new file mode 100644 index 000000000..c46cbcd31 --- /dev/null +++ b/examples/informers/ServiceCollectionKubernetesExtensions.cs @@ -0,0 +1,49 @@ +using System; +using System.Linq; +using k8s; +using k8s.Informers; +using k8s.Informers.Cache; +using Microsoft.Extensions.DependencyInjection; +using Microsoft.Rest; +using Microsoft.Rest.TransientFaultHandling; + +namespace informers +{ + public static class Extensions + { + public static IServiceCollection AddKubernetes(this IServiceCollection services) + { + services.AddHostedService(); + var controllers = AppDomain.CurrentDomain.GetAssemblies() + .SelectMany(x => x.DefinedTypes) + .Where(x => x.IsClass && !x.IsAbstract && typeof(IController).IsAssignableFrom(x)) + .ToList(); + foreach (var controller in controllers) + { + services.AddSingleton(typeof(IController), controller); + } + var config = KubernetesClientConfiguration.BuildDefaultConfig(); + + services.AddSingleton(config); + services.AddSingleton(); + services.AddHttpClient("DefaultName") + .AddTypedClient((httpClient, serviceProvider) => + new Kubernetes( + serviceProvider.GetRequiredService(), + httpClient)) + .AddHttpMessageHandler(() => new RetryDelegatingHandler() + { + RetryPolicy = new RetryPolicy(new ExponentialBackoffRetryStrategy()) + }) + .AddHttpMessageHandler(KubernetesClientConfiguration.CreateWatchHandler) + .ConfigurePrimaryHttpMessageHandler(config.CreateDefaultHttpClientHandler); + var kubernetesResources = AppDomain.CurrentDomain.GetAssemblies() + .SelectMany(x => x.DefinedTypes) + .Where(x => typeof(IKubernetesObject).IsAssignableFrom(x)) + .ToList(); + services.AddTransient(typeof(KubernetesInformer<>)); + services.AddSingleton(typeof(IKubernetesInformer<>), typeof(SharedKubernetesInformer<>)); + return services; + } + } +} diff --git a/examples/informers/appsettings.Development.json b/examples/informers/appsettings.Development.json new file mode 100644 index 000000000..8983e0fc1 --- /dev/null +++ b/examples/informers/appsettings.Development.json @@ -0,0 +1,9 @@ +{ + "Logging": { + "LogLevel": { + "Default": "Information", + "Microsoft": "Warning", + "Microsoft.Hosting.Lifetime": "Information" + } + } +} diff --git a/examples/informers/appsettings.json b/examples/informers/appsettings.json new file mode 100644 index 000000000..8983e0fc1 --- /dev/null +++ b/examples/informers/appsettings.json @@ -0,0 +1,9 @@ +{ + "Logging": { + "LogLevel": { + "Default": "Information", + "Microsoft": "Warning", + "Microsoft.Hosting.Lifetime": "Information" + } + } +} diff --git a/examples/informers/informers.csproj b/examples/informers/informers.csproj new file mode 100644 index 000000000..e79261a43 --- /dev/null +++ b/examples/informers/informers.csproj @@ -0,0 +1,20 @@ + + + + netcoreapp3.1 + true + ..\..\src\KubernetesClient\kubernetes-client.snk + + + + + + + + + + + + + + diff --git a/kubernetes-client.sln b/kubernetes-client.sln index 7627fbd58..46ac8a4c9 100644 --- a/kubernetes-client.sln +++ b/kubernetes-client.sln @@ -35,6 +35,8 @@ Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "patch", "examples\patch\pat EndProject Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "httpClientFactory", "examples\httpClientFactory\httpClientFactory.csproj", "{A07314A0-02E8-4F36-B233-726D59D28F08}" EndProject +Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "informers", "examples\informers\informers.csproj", "{A24E81F3-EFB1-4AFE-8C87-BDF2D3A0C0C2}" +EndProject Global GlobalSection(SolutionConfigurationPlatforms) = preSolution Debug|Any CPU = Debug|Any CPU @@ -189,6 +191,18 @@ Global {A07314A0-02E8-4F36-B233-726D59D28F08}.Release|x64.Build.0 = Release|Any CPU {A07314A0-02E8-4F36-B233-726D59D28F08}.Release|x86.ActiveCfg = Release|Any CPU {A07314A0-02E8-4F36-B233-726D59D28F08}.Release|x86.Build.0 = Release|Any CPU + {A24E81F3-EFB1-4AFE-8C87-BDF2D3A0C0C2}.Debug|Any CPU.ActiveCfg = Debug|Any CPU + {A24E81F3-EFB1-4AFE-8C87-BDF2D3A0C0C2}.Debug|Any CPU.Build.0 = Debug|Any CPU + {A24E81F3-EFB1-4AFE-8C87-BDF2D3A0C0C2}.Debug|x64.ActiveCfg = Debug|Any CPU + {A24E81F3-EFB1-4AFE-8C87-BDF2D3A0C0C2}.Debug|x64.Build.0 = Debug|Any CPU + {A24E81F3-EFB1-4AFE-8C87-BDF2D3A0C0C2}.Debug|x86.ActiveCfg = Debug|Any CPU + {A24E81F3-EFB1-4AFE-8C87-BDF2D3A0C0C2}.Debug|x86.Build.0 = Debug|Any CPU + {A24E81F3-EFB1-4AFE-8C87-BDF2D3A0C0C2}.Release|Any CPU.ActiveCfg = Release|Any CPU + {A24E81F3-EFB1-4AFE-8C87-BDF2D3A0C0C2}.Release|Any CPU.Build.0 = Release|Any CPU + {A24E81F3-EFB1-4AFE-8C87-BDF2D3A0C0C2}.Release|x64.ActiveCfg = Release|Any CPU + {A24E81F3-EFB1-4AFE-8C87-BDF2D3A0C0C2}.Release|x64.Build.0 = Release|Any CPU + {A24E81F3-EFB1-4AFE-8C87-BDF2D3A0C0C2}.Release|x86.ActiveCfg = Release|Any CPU + {A24E81F3-EFB1-4AFE-8C87-BDF2D3A0C0C2}.Release|x86.Build.0 = Release|Any CPU EndGlobalSection GlobalSection(SolutionProperties) = preSolution HideSolutionNode = FALSE @@ -206,6 +220,7 @@ Global {542DC30E-FDF7-4A35-B026-6C21F435E8B1} = {879F8787-C3BB-43F3-A92D-6D4C7D3A5285} {04DE2C84-117D-4E21-8B45-B7AE627697BD} = {B70AFB57-57C9-46DC-84BE-11B7DDD34B40} {A07314A0-02E8-4F36-B233-726D59D28F08} = {B70AFB57-57C9-46DC-84BE-11B7DDD34B40} + {A24E81F3-EFB1-4AFE-8C87-BDF2D3A0C0C2} = {B70AFB57-57C9-46DC-84BE-11B7DDD34B40} EndGlobalSection GlobalSection(ExtensibilityGlobals) = postSolution SolutionGuid = {049A763A-C891-4E8D-80CF-89DD3E22ADC7} diff --git a/src/KubernetesClient/Controllers/ResourceEventDeltaBlock.cs b/src/KubernetesClient/Controllers/ResourceEventDeltaBlock.cs new file mode 100644 index 000000000..6f32e006a --- /dev/null +++ b/src/KubernetesClient/Controllers/ResourceEventDeltaBlock.cs @@ -0,0 +1,258 @@ +using System; +using System.Collections.Generic; +using System.Linq; +using System.Reactive.Disposables; +using System.Threading.Tasks; +using System.Threading.Tasks.Dataflow; +using k8s.Informers.Notifications; +using Microsoft.Extensions.Logging; + +namespace k8s.Controllers +{ + + /// + /// A TPL Dataflow block exposes queuing semantics that attaches to infromer observable streams. + /// It groups delta changes per key, sending each group to downstream blocks as individual messages + /// This allows "Per resource ID" processing semantics + /// Any sync events are only propagated if no other changes are queued up + /// + /// The type of key used to identity resource + /// The resource type + public class ResourceEventDeltaBlock : ISourceBlock>>, ITargetBlock> + { + private readonly Func _keyFunc; + private readonly bool _skipTransient; + private readonly Queue>> _queue = new Queue>>(); + private readonly List _targets = new List(); + private bool _isCompleting; + private long _msgId; + private readonly object _lock = new object(); + private readonly TaskCompletionSource _taskCompletionSource = new TaskCompletionSource(); + private readonly Dictionary>> _items = new Dictionary>>(); + + /// The key selector function + /// If , removes any batches in which resource was created (first msg=Add) and removed (last msg=Delete) + public ResourceEventDeltaBlock(Func keyFunc, bool skipTransient = true) + { + _keyFunc = keyFunc; + _skipTransient = skipTransient; + } + + private TKey KeyOf(ResourceEvent obj) => KeyOf(obj.Value); + + private TKey KeyOf(TResource obj) => _keyFunc(obj); + + + + + /// + /// Checks if block is marked for completion and marks itself as completed after queue is drained + /// + private void SetCompletedIfNeeded() + { + lock (_lock) + { + if (!_isCompleting || _queue.Count != 0) return; + foreach (var link in _targets.Where(x => x.LinkOptions.PropagateCompletion)) + { + link.Target.Complete(); + } + } + + _taskCompletionSource.TrySetResult(null); + } + public DataflowMessageStatus OfferMessage(DataflowMessageHeader messageHeader, ResourceEvent resourceEvent, ISourceBlock> source, + bool consumeToAccept) + { + if (_isCompleting) + return DataflowMessageStatus.DecliningPermanently; + if (consumeToAccept) + { + resourceEvent = source.ConsumeMessage(messageHeader, this, out var consumed); + if (!consumed) + return DataflowMessageStatus.NotAvailable; + } + + if (resourceEvent.EventFlags.HasFlag(EventTypeFlags.ResetEmpty) + || resourceEvent.Value == null) + return DataflowMessageStatus.Declined; + + + lock (_lock) + { + // decline any syncs if we're already queued up to process this resource + if(resourceEvent.EventFlags.HasFlag(EventTypeFlags.Sync) && _items.ContainsKey(_keyFunc(resourceEvent.Value))) + { + return DataflowMessageStatus.Declined; + } + QueueActionLocked(resourceEvent); + } + + return DataflowMessageStatus.Accepted; + } + + + private void CombineDeltas(List> deltas) + { + if (deltas.Count < 2) return; + if (deltas.First().EventFlags.HasFlag(EventTypeFlags.Sync)) // if we had a sync item queued up and got something else, get rid of sync + deltas.RemoveAt(0); + if (deltas.Count < 2) return; + + // if the entire object was created and removed before worker got a chance to touch it and worker has not chose to see these + // types of events, we can just get rid of this "transient" object and not even notify worker of its existence + if(_skipTransient && deltas[0].EventFlags.HasFlag(EventTypeFlags.Add) && deltas[deltas.Count - 1].EventFlags.HasFlag(EventTypeFlags.Delete)) + deltas.Clear(); + } + + private void QueueActionLocked(ResourceEvent obj) + { + var id = KeyOf(obj); + + var exists = _items.TryGetValue(id, out var deltas); + if (!exists) + { + deltas = new List>(); + _items[id] = deltas; + _queue.Enqueue(deltas); + } + + deltas.Add(obj); + CombineDeltas(deltas); + if (_queue.Count == 1) // we've just added to empty queue, kick off processing + OfferMessagesToLinks(); + + } + + public void Complete() + { + _isCompleting = true; + SetCompletedIfNeeded(); + } + + public void Fault(Exception exception) + { + _taskCompletionSource.SetException(exception); + foreach (var link in _targets.Where(x => x.LinkOptions.PropagateCompletion)) + { + link.Target.Fault(exception); + } + } + + public Task Completion => _taskCompletionSource.Task; + + public List> ConsumeMessage(DataflowMessageHeader messageHeader, ITargetBlock>> target, out bool messageConsumed) + { + lock (_lock) + { + var link = _targets.FirstOrDefault(x => x.Target == target); + if (link != null) + { + // doesn't matter what they told us before, they are potentially ready to receive more messages + link.LastOfferedMessageReply = DataflowMessageStatus.NotAvailable; + } + + while (true) + { + if (!_queue.TryDequeue(out var deltas)) // queue is empty, nothing left to do + { + messageConsumed = false; + return null; + } + // this can happen if the entire lifecycle of the object started and ended before worker even touched it + if(!deltas.Any()) + continue; + var id = KeyOf(deltas.First()); + try + { + // some condition caused this queued item to be expired, go to next one + if (!_items.Remove(id, out var item)) + { + continue; + } + + messageConsumed = true; + if(link != null) // only offer more messages if it's an actual linked block and not someone just asking to send em messages + Task.Run(() => OfferMessageToLink(link)); // avoid stack recursion + return item; + } + finally + { + SetCompletedIfNeeded(); + } + } + } + } + + private void OfferMessagesToLinks() + { + lock (_lock) + { + foreach (var link in _targets.ToList().Where(x => x.LastOfferedMessageReply != DataflowMessageStatus.Postponed)) + { + do // keep feeding the link messages until queue is either empty or it tells us that it can't handle any more + { + OfferMessageToLink(link); + } while (_queue.Count > 0 && link.LastOfferedMessageReply == DataflowMessageStatus.Accepted); + } + } + } + + private void OfferMessageToLink(TargetLink link) + { + List> msg; + lock (_lock) + { + if (!_queue.TryPeek(out msg)) + { + return; // queue is empty + } + } + + var header = new DataflowMessageHeader(++_msgId); + link.LastOfferedMessageReply = link.Target.OfferMessage(header, msg, this, true); + if (link.LastOfferedMessageReply == DataflowMessageStatus.DecliningPermanently) + _targets.Remove(link); + } + + public IDisposable LinkTo(ITargetBlock>> target, DataflowLinkOptions linkOptions) + { + //todo: add support for max messages + lock (_lock) + { + var link = new TargetLink + { + Target = target, + LinkOptions = linkOptions, + LastOfferedMessageReply = DataflowMessageStatus.NotAvailable + }; + if (linkOptions.Append) + _targets.Add(link); + else + _targets.Insert(0, link); + + OfferMessageToLink(link); + return Disposable.Create(() => _targets.Remove(link)); + } + } + + public void ReleaseReservation(DataflowMessageHeader messageHeader, ITargetBlock>> target) + { + // don't support reservations + } + + public bool ReserveMessage(DataflowMessageHeader messageHeader, ITargetBlock>> target) + { + return false; + } + + + class TargetLink + { + internal ITargetBlock>> Target; + internal DataflowLinkOptions LinkOptions; + internal DataflowMessageStatus LastOfferedMessageReply; + } + } + +} diff --git a/src/KubernetesClient/Extensions.cs b/src/KubernetesClient/Extensions.cs new file mode 100644 index 000000000..4ab7be798 --- /dev/null +++ b/src/KubernetesClient/Extensions.cs @@ -0,0 +1,202 @@ +using System; +using System.Collections; +using System.Collections.Generic; +using System.Diagnostics; +using System.Linq; +using System.Net; +using System.Net.Http; +using System.Reactive; +using System.Reactive.Concurrency; +using System.Reactive.Disposables; +using System.Reactive.Linq; +using System.Reactive.Subjects; +using System.Threading.Tasks; +using System.Threading.Tasks.Dataflow; +using k8s.Controllers; +using k8s.Informers; +using k8s.Informers.Cache; +using k8s.Informers.Notifications; +using k8s.Models; +using Microsoft.Extensions.Logging; +using Microsoft.Rest.TransientFaultHandling; +using YamlDotNet.Serialization.NodeTypeResolvers; + +namespace k8s +{ + public static class Extensions + { + /// + /// Removes an item from the dictionary + /// + /// The source dictionary + /// The key for which item should be removed + /// The value of the object that was removed, or if value was not present in dictionary + /// The type of key + /// The type of value + /// if the object was removed from dictionry, or if the specific key was not present in dictionary + public static bool Remove(this IDictionary source, TKey key, out TValue result) + { + result = default; + if (!source.TryGetValue(key, out result)) + return false; + source.Remove(key); + return true; + } + /// + /// Tries to remove item from the queue + /// + /// Source queue + /// The result if dequeue was successful, other + /// The type of items in queue + /// if dequeue was successful, otherwise + public static bool TryDequeue(this Queue queue, out T result) + { + result = default; + if (queue.Count == 0) + return false; + try + { + result = queue.Dequeue(); + } + catch (InvalidOperationException) + { + return false; + } + return true; + } + /// + /// Tries to look at the first item of the queue without removing it + /// + /// The source queue + /// The item at the top of the queue, or if queue is empty + /// The type of the items in the queue + /// if the operation was successful, or if the queue is empty + public static bool TryPeek(this Queue queue, out T result) + { + result = default; + if (queue.Count == 0) + return false; + try + { + result = queue.Peek(); + } + catch (InvalidOperationException) + { + return false; + } + + return true; + } + /// + /// Creates a for + /// + /// The source enumerable + /// The type of elements + /// The produced hashset + public static HashSet ToHashSet(this IEnumerable source) + { + return source.ToHashSet(null); + } + /// + /// Creates a for + /// + /// The source enumerable + /// The comparer to use + /// The type of elements + /// The produced hashset + public static HashSet ToHashSet( + this IEnumerable source, + IEqualityComparer comparer) + { + if (source == null) + throw new ArgumentNullException(nameof(source)); + return new HashSet(source, comparer); + } + + + /// + /// Converts the source sequence to . This transitions from + /// observable into TPL Dataflow monad. The resulting block allows queue processing semantics where each resulting item + /// is the collection of the given for grouped by + /// + /// The resource observable + /// The type of resource + /// The connected to the observable + public static ISourceBlock>> ToResourceEventDeltaBlock(this IObservable> source, + out IDisposable subscription) where TResource : IKubernetesObject, IMetadata => + source.ToResourceEventDeltaBlock(x => KubernetesObject.KeySelector, out subscription); + + /// + /// Converts the source sequence to . This transitions from + /// observable into TPL Dataflow monad. The resulting block allows queue processing semantics where each resulting item + /// is the collection of the given for grouped by resource object identified by the + /// parameter + /// + /// The resource observable + /// Key selector function that uniquely identifies objects in the resource collection being observed + /// The type of resource + /// The key for the resource + /// The connected to the observable + public static ISourceBlock>> ToResourceEventDeltaBlock(this IObservable> source, Func keySelector, out IDisposable subscription) + { + var deltaBlock = new ResourceEventDeltaBlock(keySelector); + subscription = source.Subscribe(deltaBlock.AsObserver()); + return deltaBlock; + } + + /// + /// Attaches the source to the target + /// + /// The original + /// The to attach to + /// The original disposable passed as + public static IDisposable DisposeWith(this IDisposable source, CompositeDisposable composite) + { + composite.Add(source); + return source; + } + + /// + /// Combines the source disposable with another into a single disposable + /// + /// The original + /// The to combine with + /// Composite disposable made up of and + public static IDisposable CombineWith(this IDisposable source, IDisposable other) + { + return new CompositeDisposable(source,other); + } + + public static IDisposable Subscribe(this IObservable source, IObserver observer, Action onFinished = null) + { + return source.Subscribe(observer, _ => { },x => onFinished(), onFinished); + } + + public static IDisposable Subscribe(this IObservable source, IObserver observer, Action onNext = null, Action onError = null, Action onCompleted = null) + { + onNext ??= obj => { }; + onError ??= obj => { }; + onCompleted ??= () => { }; + return source.Subscribe(x => + { + observer.OnNext(x); + onNext(x); + }, + error => + { + observer.OnError(error); + onError(error); + }, + () => + { + observer.OnCompleted(); + onCompleted(); + }); + } + + + + } + + +} diff --git a/src/KubernetesClient/IKubernetes.Generic.cs b/src/KubernetesClient/IKubernetes.Generic.cs new file mode 100644 index 000000000..237323e6a --- /dev/null +++ b/src/KubernetesClient/IKubernetes.Generic.cs @@ -0,0 +1,124 @@ +using System.Collections.Generic; +using System.Threading; +using System.Threading.Tasks; +using k8s.Models; +using Microsoft.Rest; + +namespace k8s +{ + public partial interface IKubernetes + { + /// + /// A generic list or watch operation + /// + /// + /// allowWatchBookmarks requests watch events with type "BOOKMARK". + /// Servers that do not implement bookmarks may ignore this flag and + /// bookmarks are sent at the server's discretion. Clients should not + /// assume bookmarks are returned at any specific interval, nor may + /// they assume the server will send any BOOKMARK event during a + /// session. If this is not a watch, this field is ignored. If the + /// feature gate WatchBookmarks is not enabled in apiserver, this field + /// is ignored. + /// + /// This field is beta. + /// + /// + /// The continue option should be set when retrieving more results from + /// the server. Since this value is server defined, clients may only + /// use the continue value from a previous query result with identical + /// query parameters (except for the value of continue) and the server + /// may reject a continue value it does not recognize. If the specified + /// continue value is no longer valid whether due to expiration + /// (generally five to fifteen minutes) or a configuration change on + /// the server, the server will respond with a 410 ResourceExpired + /// error together with a continue token. If the client needs a + /// consistent list, it must restart their list without the continue + /// field. Otherwise, the client may send another list request with the + /// token received with the 410 error, the server will respond with a + /// list starting from the next key, but from the latest snapshot, + /// which is inconsistent from the previous list results - objects that + /// are created, modified, or deleted after the first list request will + /// be included in the response, as long as their keys are after the + /// "next key". + /// + /// This field is not supported when watch is true. Clients may start a + /// watch from the last resourceVersion value returned by the server + /// and not miss any modifications. + /// + /// + /// A selector to restrict the list of returned objects by their + /// fields. Defaults to everything. + /// + /// + /// A selector to restrict the list of returned objects by their + /// labels. Defaults to everything. + /// + /// + /// limit is a maximum number of responses to return for a list call. + /// If more items exist, the server will set the `continue` field on + /// the list metadata to a value that can be used with the same initial + /// query to retrieve the next set of results. Setting a limit may + /// return fewer than the requested amount of items (up to zero items) + /// in the event all requested objects are filtered out and clients + /// should only use the presence of the continue field to determine + /// whether more results are available. Servers may choose not to + /// support the limit argument and will return all of the available + /// results. If limit is specified and the continue field is empty, + /// clients may assume that no more results are available. This field + /// is not supported if watch is true. + /// + /// The server guarantees that the objects returned when using continue + /// will be identical to issuing a single list call without a limit - + /// that is, no objects created, modified, or deleted after the first + /// request is issued will be included in any subsequent continued + /// requests. This is sometimes referred to as a consistent snapshot, + /// and ensures that a client that is using limit to receive smaller + /// chunks of a very large result can ensure they see all possible + /// objects. If objects are updated during a chunked list the version + /// of the object that was present at the time the first list result + /// was calculated is returned. + /// + /// + /// When specified with a watch call, shows changes that occur after + /// that particular version of a resource. Defaults to changes from the + /// beginning of history. When specified for list: - if unset, then the + /// result is returned from remote storage based on quorum-read flag; - + /// if it's 0, then we simply return what we currently have in cache, + /// no guarantee; - if set to non zero, then the result is at least as + /// fresh as given rv. + /// + /// + /// Timeout for the list/watch call. This limits the duration of the + /// call, regardless of any activity or inactivity. + /// + /// + /// Watch for changes to the described resources and return them as a + /// stream of add, update, and remove notifications. Specify + /// resourceVersion. + /// + /// + /// If 'true', then the output is pretty printed. + /// + /// + /// The headers that will be added to request. + /// + /// + /// The cancellation token. + /// + Task>> ListWithHttpMessagesAsync( + string namespaceParameter = default(string), + bool? allowWatchBookmarks = default(bool?), + string continueParameter = default(string), + string fieldSelector = default(string), + string labelSelector = default(string), + int? limit = default(int?), + string resourceVersion = default(string), + int? timeoutSeconds = default(int?), + bool? watch = default(bool?), + string pretty = default(string), + Dictionary> customHeaders = null, + CancellationToken cancellationToken = default(CancellationToken)) where T : IKubernetesObject; + + } +} diff --git a/src/KubernetesClient/Informers/Cache/CacheSynchronized.cs b/src/KubernetesClient/Informers/Cache/CacheSynchronized.cs new file mode 100644 index 000000000..b805d6bbe --- /dev/null +++ b/src/KubernetesClient/Informers/Cache/CacheSynchronized.cs @@ -0,0 +1,26 @@ +namespace k8s.Informers.Cache +{ + public struct CacheSynchronized + { + public CacheSynchronized(long messageNumber, long cacheVersion, T value) + { + MessageNumber = messageNumber; + CacheVersion = cacheVersion; + Value = value; + } + + /// + /// Message number in the sequencer + /// + public long MessageNumber { get; } + /// + /// The version of cache this message was included in + /// + public long CacheVersion { get; } + public T Value { get; } + public override string ToString() + { + return $"MessageNumber: {MessageNumber}, IncludedInCache: {CacheVersion}: {Value}"; + } + } +} diff --git a/src/KubernetesClient/Informers/Cache/ICache.cs b/src/KubernetesClient/Informers/Cache/ICache.cs new file mode 100644 index 000000000..66d08b8fe --- /dev/null +++ b/src/KubernetesClient/Informers/Cache/ICache.cs @@ -0,0 +1,37 @@ +using System; +using System.Collections; +using System.Collections.Generic; + +namespace k8s.Informers.Cache +{ + /// + /// Maintains cache of objects of type . + /// + /// The type of key + /// The type of resource + public interface ICache : IDictionary, IDisposable + { + /// + /// Current version of cache + /// + long Version { get; set; } + /// + /// Replace all values in cache with new values + /// + /// + void Reset(IDictionary newValues); + /// Takes a snapshot of the current cache that is version locked + /// + /// Copy of current cache locked to the version at the time cache is snapshot is taken + ICacheSnapshot Snapshot(); + } + + // A readonly snapshot of cache at a point in time + public interface ICacheSnapshot : IReadOnlyDictionary + { + /// + /// Current version of cache + /// + long Version { get; } + } +} diff --git a/src/KubernetesClient/Informers/Cache/SimpleCache.cs b/src/KubernetesClient/Informers/Cache/SimpleCache.cs new file mode 100644 index 000000000..d7451771e --- /dev/null +++ b/src/KubernetesClient/Informers/Cache/SimpleCache.cs @@ -0,0 +1,199 @@ +using System; +using System.Collections; +using System.Collections.Generic; +using System.Linq; + +namespace k8s.Informers.Cache +{ + public class SimpleCache : ICache, ICacheSnapshot + { + private readonly IDictionary _items; + + public SimpleCache() + { + _items = new Dictionary(); + } + + public SimpleCache(IDictionary items, long version) + { + Version = version; + _items = new Dictionary(items); + } + + public void Reset(IDictionary newValues) + { + lock (SyncRoot) + { + _items.Clear(); + foreach (var item in newValues) + { + _items.Add(item.Key, item.Value); + } + + } + } + + public object SyncRoot { get; } = new object(); + public ICacheSnapshot Snapshot() + { + lock (SyncRoot) + { + return new SimpleCache(this, Version); + } + } + + public IEnumerator> GetEnumerator() + { + lock (SyncRoot) + { + return _items.ToList().GetEnumerator(); + } + } + + IEnumerator IEnumerable.GetEnumerator() + { + lock (SyncRoot) + { + return _items.ToList().GetEnumerator(); + } + } + + public void Add(KeyValuePair item) + { + lock (SyncRoot) + { + _items.Add(item); + } + } + + + public void Clear() + { + lock (SyncRoot) + { + _items.Clear(); + } + } + + public bool Contains(KeyValuePair item) + { + lock (SyncRoot) + { + return _items.Contains(item); + } + } + + public void CopyTo(KeyValuePair[] array, int arrayIndex) + { + lock (SyncRoot) + { + ((IDictionary) _items).CopyTo(array, arrayIndex); + } + } + + public bool Remove(KeyValuePair item) + { + lock (SyncRoot) + { + return _items.Remove(item.Key); + } + } + + public int Count + { + get + { + lock (SyncRoot) + { + return _items.Count; + } + } + } + + public bool IsReadOnly => false; + + public void Add(TKey key, TResource value) + { + lock (SyncRoot) + { + _items.Add(key, value); + } + } + + public bool ContainsKey(TKey key) + { + lock (SyncRoot) + { + return _items.ContainsKey(key); + } + } + + public bool Remove(TKey key) + { + lock (SyncRoot) + { + if (!_items.Remove(key, out var existing)) + return false; + return true; + } + } + + public bool TryGetValue(TKey key, out TResource value) + { + lock (SyncRoot) + { + return _items.TryGetValue(key, out value); + } + } + + public TResource this[TKey key] + { + get + { + lock (SyncRoot) + { + return _items[key]; + } + } + set + { + lock (SyncRoot) + { + _items[key] = value; + } + } + } + + IEnumerable IReadOnlyDictionary.Keys => Keys; + + IEnumerable IReadOnlyDictionary.Values => Values; + + public ICollection Keys + { + get + { + lock (SyncRoot) + { + return _items.Keys.ToList(); + } + } + } + + public ICollection Values + { + get + { + lock (SyncRoot) + { + return _items.Values.ToList(); + } + } + } + + public void Dispose() + { + } + + public long Version { get; set; } //= 1; + } +} diff --git a/src/KubernetesClient/Informers/Cache/VersionPartitionedSharedCache.cs b/src/KubernetesClient/Informers/Cache/VersionPartitionedSharedCache.cs new file mode 100644 index 000000000..bf7dadbfd --- /dev/null +++ b/src/KubernetesClient/Informers/Cache/VersionPartitionedSharedCache.cs @@ -0,0 +1,294 @@ +using System; +using System.Collections; +using System.Collections.Concurrent; +using System.Collections.Generic; +using System.Linq; + +namespace k8s.Informers.Cache +{ + /// + /// Allows creating cache partitions for objects that have versioning semantics. Each partition will maintain its own view of its tracked objects, + /// but any items with same key and version will be shared across multiple cache partitions. + /// + /// + /// The semantics of this class allows for object reuse between informers without compromising each informers ownership of its own cache. Primarily the issue + /// it solves is if multiple informers are created with different options, but the data they receive may overlap + /// (ex. overlapping labels, or informer scoped to namespace and another scoped globally). Since the master informer (actual connection to physical server) will receive + /// same notification over separate channels, we run the risk of informer desynchronization if they share the same cache. However, if each informer maintains it's own + /// cache, we may get multiple duplicate objects in memory. This allows any objects that share the same key/version to point to the same reference, while maintaining + /// integrity of each cache (dictionary). Note that unlike a regular dictionary, this does not allow updates to same key/version + /// + /// The type of the key uniquely identifying object + /// The type of resource + /// The type of version associated with object + public class VersionPartitionedSharedCache + { + private readonly Func _keySelector; + private readonly Func _versionSelector; + // internal to allow for unit testing + internal readonly Dictionary Items = new Dictionary(); + private readonly HashSet _views = new HashSet(); + private readonly object _lock = new object(); + public VersionPartitionedSharedCache(Func keySelector, Func versionSelector) + { + _keySelector = keySelector; + _versionSelector = versionSelector; + } + + /// + /// Creates a unique cache partition that may share references to objects with same key/versions with other partitions + /// + /// Partitioned cache + public ICache CreatePartition() + { + lock (_lock) + { + var view = new CacheView(this); + _views.Add(view); + return view; + } + } + + + private void Remove(TResource resource, CacheView originView) + { + var versionedKey = GetVersionKeyFor(resource); + Remove(versionedKey, originView); + } + + private void Remove(VersionResourceKey versionedKey, CacheView originView) + { + var otherViewsTrackingResource = _views + .Except(new[] {originView}) + .Any(x => x.TryGetValue(versionedKey.Key, out var resource) && _versionSelector(resource).Equals(versionedKey.Version)); + if (!otherViewsTrackingResource) + { + Items.Remove(versionedKey); + } + } + + private TResource GetOrAdd(TResource resource) + { + var key = GetVersionKeyFor(resource); + if (Items.TryGetValue(key, out var existingResource)) + return existingResource; + Items.Add(key, resource); + return resource; + } + + internal struct VersionResourceKey + { + public TKey Key; + public TVersion Version; + } + + private VersionResourceKey GetVersionKeyFor(TResource resource) + { + return new VersionResourceKey() {Key = _keySelector(resource), Version = _versionSelector(resource)}; + } + + private class CacheView : ICache + { + private readonly VersionPartitionedSharedCache _parent; + private readonly Dictionary _items = new Dictionary() ; + + public long Version { get; set; }// = 1; + + public CacheView(VersionPartitionedSharedCache parent) + { + _parent = parent; + } + + public void Reset(IDictionary newValues) + { + lock (_parent._lock) + { + _items.Clear(); + foreach (var item in newValues) + { + _items.Add(item.Key, item.Value); + } + } + } + + public object SyncRoot => _parent._lock; + public ICacheSnapshot Snapshot() + { + lock (_parent) + { + return new SimpleCache(this, Version); + } + } + + public IEnumerator> GetEnumerator() + { + lock (_parent._lock) + { + return _items.ToList().GetEnumerator(); + } + } + + IEnumerator IEnumerable.GetEnumerator() + { + lock (_parent._lock) + { + return _items.ToList().GetEnumerator(); + } + } + + public void Add(KeyValuePair item) + { + lock (_parent._lock) + { + AssertMatchingKeys(item.Key, item.Value); + var cacheItem = _parent.GetOrAdd(item.Value); + _items.Add(_parent._keySelector(cacheItem), cacheItem); + } + } + + private void AssertMatchingKeys(TKey key, TResource resource) + { + if(!key.Equals(_parent._keySelector(resource))) + throw new InvalidOperationException("The value of the key specified is not the same as the one inside the resource"); + } + + public void Clear() + { + lock (_parent._lock) + { + foreach (var item in _items) + { + _parent.Remove(item.Value, this); + } + + _items.Clear(); + } + } + + public bool Contains(KeyValuePair item) + { + lock (_parent._lock) + { + return _items.Contains(item); + } + } + + public void CopyTo(KeyValuePair[] array, int arrayIndex) + { + lock (_parent._lock) + { + ((IDictionary) _items).CopyTo(array, arrayIndex); + } + } + + public bool Remove(KeyValuePair item) + { + lock (_parent._lock) + { + _parent.Remove(item.Value, this); + return _items.Remove(item.Key); + } + } + + public int Count + { + get + { + lock (_parent._lock) + { + return _items.Count; + } + } + } + + public bool IsReadOnly => false; + + public void Add(TKey key, TResource value) + { + lock (_parent._lock) + { + AssertMatchingKeys(key, value); + value = _parent.GetOrAdd(value); + _items.Add(key, value); + } + } + + public bool ContainsKey(TKey key) + { + lock (_parent._lock) + { + return _items.ContainsKey(key); + } + } + + public bool Remove(TKey key) + { + lock (_parent._lock) + { + if (!_items.Remove(key, out var existing)) + return false; + _parent.Remove(existing, this); + return true; + } + } + + public bool TryGetValue(TKey key, out TResource value) + { + lock (_parent._lock) + { + return _items.TryGetValue(key, out value); + } + } + + public TResource this[TKey key] + { + get + { + lock (_parent._lock) + { + return _items[key]; + } + } + set + { + // the semantics of set here are tricky because if the value already exists, it will reuse existing + // this means that consumers should not make assumption that the object that was passed as value to set + // is the one that got added to collection, and should always do a "get" operation if they plan on modifying it + lock (_parent._lock) + { + AssertMatchingKeys(key, value); + var existing = _parent.GetOrAdd(value); + _items[key] = existing; + } + } + } + + public ICollection Keys + { + get + { + lock (_parent._lock) + { + return _items.Keys.ToList(); + } + } + } + + public ICollection Values + { + get + { + lock (_parent._lock) + { + return _items.Values.ToList(); + } + } + } + + public void Dispose() + { + _parent._views.Remove(this); + } + } + } +} diff --git a/src/KubernetesClient/Informers/FaultTolerance/Extensions.cs b/src/KubernetesClient/Informers/FaultTolerance/Extensions.cs new file mode 100644 index 000000000..68be9ab58 --- /dev/null +++ b/src/KubernetesClient/Informers/FaultTolerance/Extensions.cs @@ -0,0 +1,27 @@ +using System; +using System.Net; +using System.Net.Http; +using Microsoft.Rest.TransientFaultHandling; + +namespace k8s.Informers.FaultTolerance +{ + public static class Extensions + { + /// + /// Checks if the type of exception is the one that is temporary and will resolve itself over time + /// + /// Exception to check + /// Return if exception is transient, or if it's not + public static bool IsTransient(this Exception exception) + { + if (exception is HttpRequestWithStatusException statusException) + { + return statusException.StatusCode >= HttpStatusCode.ServiceUnavailable || statusException.StatusCode == HttpStatusCode.RequestTimeout; + } + + if (exception is HttpRequestException || exception is KubernetesException) + return true; + return false; + } + } +} diff --git a/src/KubernetesClient/Informers/FaultTolerance/RetryPolicy.cs b/src/KubernetesClient/Informers/FaultTolerance/RetryPolicy.cs new file mode 100644 index 000000000..3cedf2fa7 --- /dev/null +++ b/src/KubernetesClient/Informers/FaultTolerance/RetryPolicy.cs @@ -0,0 +1,77 @@ +using System; +using System.Reactive.Linq; +using System.Threading.Tasks; + +namespace k8s.Informers.FaultTolerance +{ + /// + /// Specifies retry policy to apply to a Task or Observable + /// + /// + /// This class could potentially be replaced by Polly, but currently Polly doesn't work with observables (need access to policy rules in the builder, which are internal atm). + /// + public class RetryPolicy + { + /// + /// No retry policy should be applied + /// + public static readonly RetryPolicy None = new RetryPolicy((_,__) => false, _ => TimeSpan.Zero); + /// A delegate which accepts exception being handled and retry attempt, and returns if retry should be attempted + /// A delegate that accepts retry attempt and returns delay till next retry attempt + public RetryPolicy(Func shouldRetry, Func retryDelay) + { + ShouldRetry = shouldRetry; + RetryDelay = retryDelay; + } + + internal Func ShouldRetry { get; } + internal Func RetryDelay { get; } + + /// + /// Executes a given task while applying the specified retry policy + /// + /// Delegate for the task to execute + /// Return type of the Task + /// Task result + public async Task ExecuteAsync(Func> action) + { + var retryCount = 1; + while (true) + { + try + { + return await action(); + } + catch (Exception e) + { + if (!ShouldRetry(e, retryCount)) + throw; + retryCount++; + await Task.Delay(RetryDelay(retryCount)); + } + } + } + } + + public static class RetryPolicyExtensions + { + /// + /// Catches any exceptions in observable sequence and handles them with the specified retry policy. + /// Resubscribes to the observable if the policy determines that retry should be attempted + /// + /// The source observable + /// The retry policy to apply + /// The type of the observable + /// Original observable wrapped in retry policy + public static IObservable WithRetryPolicy(this IObservable observable, RetryPolicy retryPolicy) + { + var retryCounter = 1; + return observable.Catch(exception => + { + if (!retryPolicy.ShouldRetry(exception, retryCounter)) return Observable.Throw(exception); + retryCounter++; + return observable.DelaySubscription(retryPolicy.RetryDelay(retryCounter)); + }); + } + } +} diff --git a/src/KubernetesClient/Informers/IInformer.cs b/src/KubernetesClient/Informers/IInformer.cs new file mode 100644 index 000000000..3d513bdd2 --- /dev/null +++ b/src/KubernetesClient/Informers/IInformer.cs @@ -0,0 +1,57 @@ +using System; +using k8s.Informers.Notifications; + +namespace k8s.Informers +{ + /// + /// Provides observable abstraction over collections of resource of type which support List/Watch semantics + /// + /// The type of resource + public interface IInformer + { + /// + /// Exposes an Observable stream over a resource of a particular type + /// + /// + /// Message stream semantics are as following + /// - When subscription is first established and is has flag set + /// the first batch of messages that will be sent when subscription is opened the current state of all the objects being monitored. + /// This batch is referred to as "resource list reset". + /// - Each message in reset event will be of type + /// - The boundaries of the reset event will be marked with and + /// - If there are no objects in a reset list event, and the has a flag set, + /// message with flag is used to mark the end of List operation and start of Watch + /// + /// Observable type + /// Observable stream for resources of a particular type + IObservable> GetResource(ResourceStreamType type); + + } + + /// + /// Provides observable abstraction over collections of resource of type which support List/Watch semantics, + /// and support subscriptions with type + /// + /// The type of resource + /// The type of options + public interface IInformer + { + /// + /// Exposes an Observable stream over a resource of a particular type + /// + /// + /// Message stream semantics are as following + /// - When subscription is first established and is has flag set + /// the first batch of messages that will be sent when subscription is opened the current state of all the objects being monitored. + /// This batch is referred to as "resource list reset". + /// - Each message in reset event will be of type + /// - The boundaries of the reset event will be marked with and + /// - If there are no objects in a reset list event, and the has a flag set, + /// message with flag is used to mark the end of List operation and start of Watch + /// + /// Observable type + /// + /// + IObservable> GetResource(ResourceStreamType type, TOptions options); + } +} diff --git a/src/KubernetesClient/Informers/IKubernetesInformer.cs b/src/KubernetesClient/Informers/IKubernetesInformer.cs new file mode 100644 index 000000000..40672a8c8 --- /dev/null +++ b/src/KubernetesClient/Informers/IKubernetesInformer.cs @@ -0,0 +1,11 @@ +namespace k8s.Informers +{ + /// + /// An informer that serves kubernetes resources + /// + /// The type of Kubernetes resource + public interface IKubernetesInformer : IInformer, IInformer where TResource : IKubernetesObject + { + + } +} \ No newline at end of file diff --git a/src/KubernetesClient/Informers/KubernetesInformer.cs b/src/KubernetesClient/Informers/KubernetesInformer.cs new file mode 100644 index 000000000..ed5e8070b --- /dev/null +++ b/src/KubernetesClient/Informers/KubernetesInformer.cs @@ -0,0 +1,164 @@ +using System; +using System.Collections.Generic; +using System.Diagnostics; +using System.Linq; +using System.Net; +using System.Reactive.Linq; +using System.Threading.Tasks; +using k8s.Informers.FaultTolerance; +using k8s.Informers.Notifications; +using k8s.Models; +using Microsoft.Extensions.Logging; +using Microsoft.Rest.TransientFaultHandling; +using RetryPolicy = k8s.Informers.FaultTolerance.RetryPolicy; + +namespace k8s.Informers +{ + /// + /// An implementation of Kubernetes informer that talks to Kubernetes API Server + /// + /// The type of Kubernetes resource + public class KubernetesInformer : IKubernetesInformer where TResource : IKubernetesObject + { + private readonly IKubernetes _kubernetes; + private readonly Func _restartOnCompletion; + private readonly RetryPolicy _retryPolicy; + + public KubernetesInformer(IKubernetes kubernetes, RetryPolicy retryPolicy = null) : this(kubernetes, retryPolicy, () => true) + { + } + public KubernetesInformer(IKubernetes kubernetes, RetryPolicy retryPolicy, Func restartOnCompletion) + { + _kubernetes = kubernetes; + _restartOnCompletion = restartOnCompletion; + _retryPolicy = retryPolicy ?? DefaultRetryPolicy; + } + private static RetryPolicy DefaultRetryPolicy => new RetryPolicy( + (exception, retryAttempt) => exception.IsTransient(), + retryAttempt => TimeSpan.FromSeconds(Math.Min(Math.Pow(2, retryAttempt), 30))); + + public IObservable> GetResource(ResourceStreamType type) => GetResource(type, KubernetesInformerOptions.Default); + + public IObservable> GetResource(ResourceStreamType type, KubernetesInformerOptions options) + { + return new KubernetesInformerEmitter(this, options, type).GetObservable(); + } + + private class KubernetesInformerEmitter + { + private string _resourceVersion; + private readonly KubernetesInformer _parent; + private readonly KubernetesInformerOptions _options; + private readonly ResourceStreamType _type; + + public KubernetesInformerEmitter(KubernetesInformer parent, KubernetesInformerOptions options, ResourceStreamType type) + { + _parent = parent; + _options = options; + _type = type; + } + + public IObservable> GetObservable() + { + var result = Observable.Empty>(); + if (_type != ResourceStreamType.Watch) + { + result = result.Concat(List()); + } + if (_type != ResourceStreamType.List) + { + result = result.Concat(Watch()); + } + + return result; + } + + private IObservable> List() + { + return Observable.Create>(async (observer, cancellationToken) => + { + var response = await _parent._kubernetes.ListWithHttpMessagesAsync( + namespaceParameter: _options.Namespace, + resourceVersion: _resourceVersion, + //labelSelector: options.LabelsSelector, + cancellationToken: cancellationToken).ConfigureAwait(false); + if(!response.Response.IsSuccessStatusCode) + throw new HttpRequestWithStatusException("Web server replied with error code") { StatusCode = response.Response.StatusCode }; + + var listKubernetesObject = response.Body; + _resourceVersion = listKubernetesObject.Metadata.ResourceVersion; + var items = listKubernetesObject.Items ?? new List(); + foreach (var item in items.ToReset(_type == ResourceStreamType.ListWatch)) + { + if (cancellationToken.IsCancellationRequested) + break; + observer.OnNext(item); + } + }) + .WithRetryPolicy(_parent._retryPolicy); + } + + private IObservable> Watch() + { + return Observable.Create>(async (observer, cancellationToken) => + { + var result = await _parent._kubernetes.ListWithHttpMessagesAsync( + watch: true, + allowWatchBookmarks: true, + resourceVersion: _resourceVersion, + timeoutSeconds: int.MaxValue, + cancellationToken: cancellationToken + ).ConfigureAwait(false); + if (!result.Response.IsSuccessStatusCode) + throw new HttpRequestWithStatusException("Web server replied with error code") {StatusCode = result.Response.StatusCode}; + return Task.FromResult(result) + .Watch() + .SelectMany(x => // this is not a one to one mapping as some events cause side effects but don't propagate, so we need SelectMany + { + switch (x.Type) + { + case WatchEventType.Added: + return new[] {x.Object.ToResourceEvent(EventTypeFlags.Add)}; + case WatchEventType.Deleted: + return new[] {x.Object.ToResourceEvent(EventTypeFlags.Delete)}; + case WatchEventType.Modified: + return new[] {x.Object.ToResourceEvent(EventTypeFlags.Modify)}; + case WatchEventType.Bookmark: + if (x.Object is IMetadata status) + _resourceVersion = status.Metadata.ResourceVersion; + break; + case WatchEventType.Error: + default: + if (x.Object is V1Status error) + { + throw new KubernetesException(error); + } + + throw new KubernetesException($"Received unknown error in watch: {x.Object}"); + } + + return Enumerable.Empty>(); + }) + .Select(x => x) + // watch should never "complete" on it's own unless there's a critical exception, except in testing scenarios + .Concat(_parent._restartOnCompletion() ? Observable.Defer(Watch) : Observable.Empty>()) + .Subscribe(observer); + }) + .Catch, Exception>(exception => + { + // handle case when we tried rewatching by specifying resource version to resume after disconnect, + // but resource is too stale - should try to resubscribe from scratch + if (exception is HttpRequestWithStatusException httpException && httpException.StatusCode == HttpStatusCode.Gone && _resourceVersion != null) + { + // we tried resuming but failed, restart from scratch + _resourceVersion = null; + return GetObservable(); + } + return Observable.Throw>(exception); + }) + .WithRetryPolicy(_parent._retryPolicy); + + } + } + } +} diff --git a/src/KubernetesClient/Informers/KubernetesInformerOptions.cs b/src/KubernetesClient/Informers/KubernetesInformerOptions.cs new file mode 100644 index 000000000..d46f2d0fa --- /dev/null +++ b/src/KubernetesClient/Informers/KubernetesInformerOptions.cs @@ -0,0 +1,16 @@ +namespace k8s.Informers +{ + public class KubernetesInformerOptions // theoretically this could be done with QObservable, but parsing expression trees is too much overhead at this point + { + /// + /// The default options for kubernetes informer, without any server side filters + /// + public static KubernetesInformerOptions Default { get; } = new KubernetesInformerOptions(); + /// + /// The namespace to which observable stream should be filtered + /// + public string Namespace { get; set; } + // todo: add label selector. needs a proper builder as there are many permutations + + } +} diff --git a/src/KubernetesClient/Informers/KubernetesKeyVersionEqualityComparer.cs b/src/KubernetesClient/Informers/KubernetesKeyVersionEqualityComparer.cs new file mode 100644 index 000000000..4eaf3207a --- /dev/null +++ b/src/KubernetesClient/Informers/KubernetesKeyVersionEqualityComparer.cs @@ -0,0 +1,37 @@ +using System; +using System.Collections.Generic; +using k8s.Models; + +namespace k8s.Informers +{ + public class KubernetesNameVersionEqualityComparer : IEqualityComparer where T : IMetadata + { + private KubernetesNameVersionEqualityComparer() + { + } + + public static KubernetesNameVersionEqualityComparer Instance => new KubernetesNameVersionEqualityComparer(); + public bool Equals(T x, T y) + { + if (x?.Metadata?.Name == null || y?.Metadata?.Name == null || x.Metadata.ResourceVersion == null || y.Metadata.ResourceVersion == null) + return false; + return x.Metadata.Name.Equals(y.Metadata.Name) && x.Metadata.ResourceVersion.Equals(y.Metadata.ResourceVersion); + } + + public int GetHashCode(T obj) + { + if(obj == null) + throw new ArgumentNullException(nameof(obj)); + unchecked + { + if (obj.Metadata?.Name == null || obj.Metadata?.ResourceVersion == null) + return 0; + var hashCode = obj.Metadata.Name.GetHashCode(); + hashCode = (hashCode * 397) ^ obj.Metadata.ResourceVersion.GetHashCode(); + return hashCode; + } + } + } + + +} diff --git a/src/KubernetesClient/Informers/Notifications/EventTypeFlags.cs b/src/KubernetesClient/Informers/Notifications/EventTypeFlags.cs new file mode 100644 index 000000000..4afac71e1 --- /dev/null +++ b/src/KubernetesClient/Informers/Notifications/EventTypeFlags.cs @@ -0,0 +1,60 @@ +using System; + +namespace k8s.Informers.Notifications +{ + /// + /// Denotes flags that specify how the event in resource observable stream should be interpreted. + /// Note that more then one value is usually set - use HasFlag instead of equals + /// + [Flags] + public enum EventTypeFlags + { + /// + /// A resource was added + /// + Add = 1, + /// + /// A resource was deleted + /// + Delete = 2, + /// + /// A resource was modified + /// + Modify = 4, + /// + /// State of the resource has not changed and the intent of the message is inform of current state + /// + Current = 8, + /// + /// The current state of the resource is published as part of regular synchronization interval + /// + Sync = 16, + + /// + /// The state of the resource has been reset, and all subscribers should reset their existing cache values based on the new + /// + Reset = 32, + /// + /// The start of a sequence of reset messages, usually used to mark the start of a List operation + /// + ResetStart = 64, + /// + /// The end of a sequence of reset messages, usually used to mark the start of a List operation + /// + ResetEnd = 128, + /// + /// Marks the boundary between empty list operation and the start of watch in an observable stream + /// + ResetEmpty = 256, + + /// + /// The event was computed through discrepancy reconciliation with server rather then explicit event. + /// This can occur when relisting after reconnect to resource server when there are items in local cache that + /// don't match what is in cache, so there must have been updates that were missed. By comparing current state + /// and old state (cache), we can compute the kind of events that we missed and emit them with this flag + /// + Computed = 512, + + + } +} diff --git a/src/KubernetesClient/Informers/Notifications/ResourceEvent.cs b/src/KubernetesClient/Informers/Notifications/ResourceEvent.cs new file mode 100644 index 000000000..b371a96e2 --- /dev/null +++ b/src/KubernetesClient/Informers/Notifications/ResourceEvent.cs @@ -0,0 +1,124 @@ +using System.Collections.Generic; +using System.Diagnostics; +using System.Text; + +namespace k8s.Informers.Notifications +{ + /// + /// + /// + /// + [DebuggerStepThrough] + public struct ResourceEvent + { + public ResourceEvent(EventTypeFlags eventFlags, TResource value, TResource oldValue = default) + { + if (eventFlags.HasFlag(EventTypeFlags.ResetEmpty) || eventFlags.HasFlag(EventTypeFlags.ResetEmpty)) + eventFlags |= EventTypeFlags.ResetStart | EventTypeFlags.ResetEnd; + if (eventFlags.HasFlag(EventTypeFlags.ResetEnd) || eventFlags.HasFlag(EventTypeFlags.ResetStart)) + eventFlags |= EventTypeFlags.Reset; + if (eventFlags.HasFlag(EventTypeFlags.Reset) || eventFlags.HasFlag(EventTypeFlags.Sync)) + eventFlags |= EventTypeFlags.Current; + + Value = value; + OldValue = oldValue; + EventFlags = eventFlags; + } + + public EventTypeFlags EventFlags { get; } + + public TResource OldValue { get; } + public TResource Value { get; } + public static ResourceEvent ResetEmpty { get; } = new ResourceEvent(EventTypeFlags.ResetEmpty, default); + + public override string ToString() + { + bool includePrefix = Value != null && OldValue != null; + + var sb = new StringBuilder(); + sb.AppendLine(); + sb.Append(" "); + sb.Append(EventFlags); + sb.Append(": ["); + if (Value != null) + { + if (includePrefix) + { + sb.Append(nameof(Value)); + sb.Append("{ "); + } + sb.Append(Value); + if (includePrefix) + { + sb.Append("} "); + } + } + if (OldValue != null) + { + if (includePrefix) + { + sb.Append(nameof(OldValue)); + sb.Append("{ "); + } + sb.Append(OldValue); + if (includePrefix) + { + sb.Append("} "); + } + } + + sb.Append("]"); + return sb.ToString(); + } + } + + public static class ResourceEventExtensions + { + public static ResourceEvent ToResourceEvent(this T obj, EventTypeFlags typeFlags, T oldValue = default) + { + if (typeFlags.HasFlag(EventTypeFlags.Delete) && oldValue == null) + oldValue = obj; + return new ResourceEvent(typeFlags, obj, oldValue); + } + + /// + /// Converts a list of objects to a resource reset list event block. Every item is of type , + /// with first and last elements also having and + /// set respectively. If is empty and is set, + /// + /// The source enumerable + /// If the resulting will contain a single + /// with no object value and flag set + /// The type of resource + /// The resulting enumerable of reset events + public static IEnumerable> ToReset(this IEnumerable source, bool emitEmpty = false) + { + int i = 0; + using var enumerator = source.GetEnumerator(); + if (!enumerator.MoveNext()) + { + if(emitEmpty) + yield return new ResourceEvent(EventTypeFlags.ResetEmpty, default); + yield break; + } + + var current = enumerator.Current; + while (enumerator.MoveNext()) + { + if (i == 0) + yield return current.ToResourceEvent(EventTypeFlags.ResetStart); + else + yield return current.ToResourceEvent(EventTypeFlags.Reset); + current = enumerator.Current; + i++; + }; + if(i == 0) + yield return current.ToResourceEvent(EventTypeFlags.ResetStart | EventTypeFlags.ResetEnd); + else + yield return current.ToResourceEvent(EventTypeFlags.ResetEnd); + + } + } + + +} diff --git a/src/KubernetesClient/Informers/ResourceObservableExtensions.cs b/src/KubernetesClient/Informers/ResourceObservableExtensions.cs new file mode 100644 index 000000000..1d9ff83f8 --- /dev/null +++ b/src/KubernetesClient/Informers/ResourceObservableExtensions.cs @@ -0,0 +1,326 @@ +using System; +using System.Collections.Generic; +using System.Linq; +using System.Reactive; +using System.Reactive.Concurrency; +using System.Reactive.Disposables; +using System.Reactive.Linq; +using System.Reactive.Subjects; +using System.Threading.Tasks; +using System.Threading.Tasks.Dataflow; +using k8s.Informers.Cache; +using k8s.Informers.Notifications; +using Microsoft.Extensions.Logging; + +namespace k8s.Informers +{ + public static class ResourceObservableExtensions + { + + /// + /// Connects source block which publishes list of to action block + /// which invokes processing function specified by for each received item. + /// + /// The source action block to attach to + /// The action to invoke for each received batch of + /// Number of allowed parallel invocations of . Default is 1 + /// The resource type + /// The disposable that disconnects from the when disposed of + public static IDisposable ProcessWith(this ISourceBlock>> workerQueue, Func>,Task> action, ILogger logger, int parallelWorkers = 1) + { + var actionBlock = new ActionBlock>>(action, new ExecutionDataflowBlockOptions + { + BoundedCapacity = parallelWorkers, // don't buffer more messages then we are actually able to work on + MaxDegreeOfParallelism = parallelWorkers + }); + actionBlock.Completion.ContinueWith(x => + { + if (x.IsFaulted) + { + logger.LogCritical(x.Exception.Flatten(), "Controller encountered a critical error"); + } + }); + return workerQueue.LinkTo(actionBlock, new DataflowLinkOptions { PropagateCompletion = true}); + + } + + public static IObservable> DetectResets(this IObservable> source, IObserver>> resetSubject) + { + var resetBuffer = new List>(); + + return Observable.Create>(observer => + { + void FlushBuffer() + { + if (resetBuffer.Any()) + { + resetSubject.OnNext(resetBuffer.ToList()); + resetBuffer.Clear(); + } + } + void OnComplete() + { + FlushBuffer(); + observer.OnCompleted(); + resetSubject.OnCompleted(); + } + void OnError(Exception e) + { + observer.OnError(e); + resetSubject.OnError(e); + } + var upstreamSubscription = source + .Do(notification => + { + if (notification.EventFlags.HasFlag(EventTypeFlags.Reset)) + { + resetBuffer.Add(notification); + if (!(notification.EventFlags.HasFlag(EventTypeFlags.ResetEnd))) // continue buffering till we reach the end of list window + return; + } + + if (notification.EventFlags.HasFlag(EventTypeFlags.ResetEnd) || (!notification.EventFlags.HasFlag(EventTypeFlags.Reset) && resetBuffer.Count > 0)) + { + FlushBuffer(); + } + }) + .Subscribe(observer.OnNext, OnError, OnComplete); + return new CompositeDisposable(upstreamSubscription, Disposable.Create(OnComplete)); + }) + .ObserveOn(Scheduler.Immediate); + } + + /// + /// Synchronizes the specified cache with resource event stream such that cache is maintained up to date. + /// + /// The source sequence + /// The cache to synchronize + /// The key selector function + /// The type of key + /// The type of resource + /// Source sequence wrapped into , which allows downstream consumers to synchronize themselves with cache version + public static IObservable>> SynchronizeCache( + this IObservable> source, + ICache cache, + Func keySelector) + { + // long cacheVersion = 0; + var resetSubject = new Subject>>(); + + + List>> UpdateBlock(List> events) + { + // cacheVersion += events.Count; + var reset = events + .Select(x => x.Value) + .Where(x => x != null) + .ToDictionary(keySelector, x => x); + + cache.Reset(reset); + var acc = cache.Version + 1; + cache.Version += events.Count; + return events + .Select(x => new CacheSynchronized>(acc++, cache.Version, x)) + .ToList(); + } + + CacheSynchronized> UpdateSingle(ResourceEvent notification) + { + cache.Version++; + if (!notification.EventFlags.HasFlag(EventTypeFlags.Delete)) + { + if (notification.EventFlags.HasFlag(EventTypeFlags.Modify) && cache.TryGetValue(keySelector(notification.Value), out var oldValue)) + { + notification = new ResourceEvent(notification.EventFlags, notification.Value, oldValue); + } + + if (notification.Value != null) + { + cache[keySelector(notification.Value)] = notification.Value; + } + } + else + { + cache.Remove(keySelector(notification.OldValue)); + } + + return new CacheSynchronized>(cache.Version, cache.Version, notification); + } + + return Observable.Create>>(obs => + { + var a = resetSubject + .SelectMany(UpdateBlock) + .ObserveOn(Scheduler.Immediate) + .Subscribe(obs); + var b = source + .DetectResets(resetSubject) + .Where(x => !x.EventFlags.HasFlag(EventTypeFlags.Reset)) // hold back resets, they'll be reinjected as batches via resetSubject after cache sync + .Select(UpdateSingle) + .ObserveOn(Scheduler.Immediate) + .Subscribe(obs); + return new CompositeDisposable(a, b); + }) + .ObserveOn(Scheduler.Immediate); + + + } + + + public static IObservable> ComputeMissedEventsBetweenResets(this IObservable> source, Func keySelector, IEqualityComparer comparer) + { + Dictionary cacheSnapshot; + var cache = new SimpleCache(); + var cacheSynchronized = false; + return Observable.Create>(observer => + { + return source + .DetectResets(Observer.Create>>(resetBuffer => + { + if (!cacheSynchronized) + { + resetBuffer + .ToObservable() + .Concat(Observable.Never>()) + .ObserveOn(Scheduler.Immediate) + .Subscribe(observer); + return; + } + + cacheSnapshot = new Dictionary(cache); + var newKeys = resetBuffer + .Where(x => x.Value != null) + .Select(x => keySelector(x.Value)) + .ToHashSet(); + + var addedEntities = resetBuffer + .Select(x => x.Value) + .Where(x => x != null && !cacheSnapshot.ContainsKey(keySelector(x))) + .Select(x => x.ToResourceEvent(EventTypeFlags.Add | EventTypeFlags.Computed)) + .ToList(); + var addedKeys = addedEntities + .Select(x => keySelector(x.Value)) + .ToHashSet(); + + var deletedEntities = cacheSnapshot + .Where(x => !newKeys.Contains(x.Key)) + .Select(x => x.Value.ToResourceEvent(EventTypeFlags.Delete | EventTypeFlags.Computed)) + .ToList(); + var deletedKeys = deletedEntities + .Select(x => keySelector(x.Value)) + .ToHashSet(); + + // we can only compute updates if we are given a proper comparer to determine equality between objects + // if not provided, will be sent downstream as just part of reset + var updatedEntities = new List>(); + if (comparer != null) + { + var previouslyKnownEntitiesInResetWindowKeys = cacheSnapshot + .Keys + .Intersect(resetBuffer.Select(x => keySelector(x.Value))); + + updatedEntities = resetBuffer + .Where(x => previouslyKnownEntitiesInResetWindowKeys.Contains(keySelector(x.Value))) + .Select(x => x.Value) // stuff in buffer that also existed in cache (by key) + .Except(cacheSnapshot.Select(x => x.Value), comparer) + .Select(x => x.ToResourceEvent(EventTypeFlags.Modify | EventTypeFlags.Computed)) + .ToList(); + } + + var updatedKeys = updatedEntities + .Select(x => keySelector(x.Value)) + .ToHashSet(); + + var resetEntities = resetBuffer + .Select(x => x.Value) + .Where(x => x != null && + !addedKeys.Contains(keySelector(x)) && + !deletedKeys.Contains(keySelector(x)) && + !updatedKeys.Contains(keySelector(x))) + .ToReset() + .ToList(); + + deletedEntities + .Union(addedEntities) + .Union(updatedEntities) + .Union(resetEntities) + .ToList() + .ToObservable() + .Concat(Observable.Never>()) + .ObserveOn(Scheduler.Immediate) + .Subscribe(observer); + })) + .SynchronizeCache(cache, keySelector) + .Do(msg => + { + cacheSynchronized = true; + }) + .Select(x => x.Value) + .Where(x => !x.EventFlags.HasFlag(EventTypeFlags.Reset)) // any resets are split off higher + .ObserveOn(Scheduler.Immediate) + .Subscribe(observer); + }); + } + /// + /// Injects a of type into the observable for each item produced + /// by the operation from + /// + /// The source sequence that will have sync messages appended + /// The timespan interval at which the messages should be produced + /// The type of resource + /// Original sequence with resync applied + public static IObservable> Resync(this IObservable> source, TimeSpan timeSpan) + { + return Observable.Create>( observer => + { + var timerSubscription = Observable + .Interval(timeSpan) + .SelectMany(_ => source + .TakeUntil(x => x.EventFlags.HasFlag(EventTypeFlags.ResetEnd)) + .Do(x => + { + if(!x.EventFlags.HasFlag(EventTypeFlags.Reset)) + throw new InvalidOperationException("Resync was applied to an observable sequence that does not issue a valid List event block when subscribed to"); + }) + .Select(x => x.Value.ToResourceEvent(EventTypeFlags.Sync))) + .Subscribe(observer); + // this ensures that both timer and upstream subscription is closed when subscriber disconnects + var sourceSubscription = source.Subscribe( + observer.OnNext, + observer.OnError, + () => + { + observer.OnCompleted(); + timerSubscription.Dispose(); + }); + return new CompositeDisposable(timerSubscription, sourceSubscription); + }); + } + + /// + /// Wraps an instance of as by using the same + /// set of for every subscription + /// + /// The original instance of + /// The options to use + /// The type of resource + /// + /// + public static IInformer WithOptions(this IInformer optionedInformer, TOptions options) => + new WrappedOptionsInformer(optionedInformer, options); + + private class WrappedOptionsInformer : IInformer + { + private readonly IInformer _informer; + private readonly TOptions _options; + + public WrappedOptionsInformer(IInformer informer, TOptions options) + { + _informer = informer; + _options = options; + } + + public IObservable> GetResource(ResourceStreamType type) => _informer.GetResource(type, _options); + } + } +} diff --git a/src/KubernetesClient/Informers/ResourceStreamType.cs b/src/KubernetesClient/Informers/ResourceStreamType.cs new file mode 100644 index 000000000..834b86e89 --- /dev/null +++ b/src/KubernetesClient/Informers/ResourceStreamType.cs @@ -0,0 +1,24 @@ +using System; + +namespace k8s.Informers +{ + /// + /// The type of resource observable stream that specifies whether to return current state of resource, observe changes, or both + /// + [Flags] + public enum ResourceStreamType + { + /// + /// A Cold observable that returns current state of resources and then completes + /// + List = 1, + /// + /// A Hot observable that publishes a list of changes as they happen + /// + Watch = 2, + /// + /// A Hot observable that Lists current state of resources followed by watch. + /// + ListWatch = 3 + } +} diff --git a/src/KubernetesClient/Informers/SharedInformer.cs b/src/KubernetesClient/Informers/SharedInformer.cs new file mode 100644 index 000000000..53ff512a4 --- /dev/null +++ b/src/KubernetesClient/Informers/SharedInformer.cs @@ -0,0 +1,209 @@ +using System; +using System.Collections.Concurrent; +using System.Collections.Generic; +using System.Diagnostics; +using System.Linq; +using System.Reactive; +using System.Reactive.Concurrency; +using System.Reactive.Disposables; +using System.Reactive.Linq; +using System.Reactive.Subjects; +using System.Threading; +using System.Threading.Tasks; +using System.Threading.Tasks.Dataflow; +using k8s.Informers.Cache; +using k8s.Informers.Notifications; +using Microsoft.Extensions.Logging; + +namespace k8s.Informers +{ + + /// + /// Wraps a single master informer (such as Kubernetes API connection) for rebroadcast to multiple internal subscribers + /// and is responsible for managing and synchronizing cache + /// + /// + /// + /// Allows rebroadcasting of single informer provided by masterInformer to multiple internal subscribers. + /// Lazy loading semantics apply where subscription to master informer is only established when there's at least one attached observer, and it is closed if all observers disconnect + /// + /// + /// is considered the sole owner of managing the cache. Since cache is used as the source of truth for "List" operations of any downstream subscribers, + /// any attempt to modify cache externally will result in desynchronization. Shared informer will only start emitting events downstream after cache has been synchronized + /// (after first List). + /// + /// + /// + public class SharedInformer : IInformer + { + private readonly ICache _cache; + private readonly ILogger _logger; + private readonly Func _keySelector; + private int _subscribers; + + private IDisposable _masterSubscription; + private TaskCompletionSource _cacheSynchronized = new TaskCompletionSource(); + readonly CountdownEvent _waitingSubscribers = new CountdownEvent(0); + private readonly object _lock = new object(); + private IScheduler _masterScheduler; + private IConnectableObservable>> _masterObservable; + + private IScheduler _masterProcessScheduler = new EventLoopScheduler(); + public SharedInformer(IInformer masterInformer, ILogger logger, Func keySelector) + : this(masterInformer, logger, keySelector,new SimpleCache(), null) + { + + } + public SharedInformer(IInformer masterInformer, ILogger logger, Func keySelector, ICache cache, IScheduler scheduler = null) + { + + _cache = cache; + _masterScheduler = scheduler ?? new EventLoopScheduler(); + _logger = logger; + _keySelector = keySelector; + _masterObservable = masterInformer + .GetResource(ResourceStreamType.ListWatch) + + .ObserveOn(_masterScheduler) + .Do(x => _logger.LogTrace($"Received message from upstream {x}")) + .SynchronizeCache(_cache, _keySelector) + .Do(msg => + { + // cache is synchronized as soon as we get at least one message past this point + _logger.LogTrace($"Cache v{cache.Version} synchronized: {msg} "); + _cacheSynchronized.TrySetResult(true); + _logger.LogTrace("_cacheSynchronized.TrySetResult(true)"); + }) + .Do(_ => YieldToWaitingSubscribers()) + .ObserveOn(Scheduler.Immediate) // immediate ensures that all caches operations are done atomically + .ObserveOn(_masterScheduler) + .Catch>, Exception>(e => + { + _cacheSynchronized.TrySetException(e); + // _cacheSynchronized.OnError(e); + return Observable.Throw>>(e); + }) + .Finally(() => _cacheSynchronized.TrySetResult(false)) + // .SubscribeOn(_masterScheduler) + .Publish(); + } + + + [DebuggerStepThrough] + void YieldToWaitingSubscribers() + { + _logger.LogTrace("Waiting for subscribers to attach to stream"); + while (_waitingSubscribers.CurrentCount > 0) + { + // give a chance to any joining subscribers to realign with the broadcast stream + + _waitingSubscribers.Wait(100); + } + _logger.LogTrace("Finished yielding to subscribers"); + + } + + + public IObservable> GetResource(ResourceStreamType type) + { + var childScheduler = new EventLoopScheduler(); // dedicated thread for the child on which all messages are syncrhonized + return Observable.Defer(async () => + { + AddSubscriber(); + _logger.LogTrace("Subscriber awaiting cache synchronization before attaching"); + + var isCacheSynchronized = await _cacheSynchronized.Task; + if (!isCacheSynchronized) // really this only happens if the reset is the master completes before first reset, in which case the downstream subscriber gets nothing + return Observable.Empty>(); + // we use lock to pause any processing of the broadcaster while we're attaching to the stream so proper alignment can be made + + _logger.LogTrace("Subscriber attaching to broadcaster"); + return Observable.Create>(observer => + { + var broadcasterAttachment = Disposable.Empty; + var cacheSnapshot = _cache.Snapshot(); + if (type.HasFlag(ResourceStreamType.List)) + { + _logger.LogTrace($"Flushing contents of cache version {cacheSnapshot.Version}"); + _cache.Values + .ToReset(type == ResourceStreamType.ListWatch) + .ToObservable() + .Concat(Observable.Never>()) + .ObserveOn(Scheduler.Immediate) + .Subscribe(observer); + } + + if (type.HasFlag(ResourceStreamType.Watch)) + { + broadcasterAttachment = _masterObservable + // we could be ahead of broadcaster because we initialized from cache which gets updated before the message are sent to broadcaster + // this logic realigns us at the correct point with the broadcaster + .Do(x => _logger.LogTrace($"Received from broadcaster {x}")) + .SkipWhile(x => x.MessageNumber <= cacheSnapshot.Version) + .Select(x => x.Value) + .Do(x => _logger.LogTrace($"Aligned with broadcaster {x}")) + .SubscribeOn(_masterScheduler) + .ObserveOn(childScheduler) + .Subscribe(observer, () => + { + _logger.LogTrace("Child OnComplete"); + RemoveSubscriber(); + }); + } + else + { + observer.OnCompleted(); + } + + // let broadcaster know we're done attaching to stream so it can resume it's regular work + _logger.LogTrace("Finished attaching to stream - signalling to resume"); + lock (_lock) + { + _waitingSubscribers.Signal(); + } + + return broadcasterAttachment; + }) + .ObserveOn(childScheduler) + .SubscribeOn(childScheduler); + }) + .SubscribeOn(childScheduler) // ensures that when we attach master observer it's done on child thread, as we plan on awaiting cache synchronization + .Do(_ => _logger.LogTrace($"Shared informer out: {_}")); + + } + + private void AddSubscriber() + { + // when child subscribers attach they need to be synchronized to the master stream + // this is allowed outside of "reset" event boundary. + // the broadcaster will yield to any _waitingSubscribers before resuming work + lock (_lock) + { + // need to do this under lock because we can't just increment if the lock is already set, and there's a + // risk of collision of two threads resetting to 1 at the same time + if (!_waitingSubscribers.TryAddCount()) + { + _waitingSubscribers.Reset(1); + } + } + + if (_subscribers == 0) + { + _masterSubscription = _masterObservable.Connect(); + } + _subscribers++; + } + + private void RemoveSubscriber() + { + _logger.LogTrace("Removing Subscriber!"); + _subscribers--; + if (_subscribers == 0) + { + _cacheSynchronized = new TaskCompletionSource(false); + _masterSubscription.Dispose(); + } + } + } + +} diff --git a/src/KubernetesClient/Informers/SharedKubernetesInformer.cs b/src/KubernetesClient/Informers/SharedKubernetesInformer.cs new file mode 100644 index 000000000..46249007d --- /dev/null +++ b/src/KubernetesClient/Informers/SharedKubernetesInformer.cs @@ -0,0 +1,46 @@ +using System; +using k8s.Informers.Cache; +using k8s.Informers.Notifications; +using k8s.Models; +using Microsoft.Extensions.DependencyInjection; +using Microsoft.Extensions.Logging; + +namespace k8s.Informers +{ + /// + /// Opens a single connection to API server with per unique + /// and attaches 1 or more internal subscriber to it. The connection is automatically opened if there is + /// at least one subscriber and closes if there are none + /// + /// The type of resource to monitor + public class SharedKubernetesInformer : + SharedOptionsInformer, + IKubernetesInformer + where TResource : IKubernetesObject, IMetadata + { + public SharedKubernetesInformer(KubernetesInformer masterInformer, ILoggerFactory loggerFactory) + : base(masterInformer, SharedKubernetesInformerFactory(loggerFactory, GetVersionPartitionedCacheFactory())) + { + } + public SharedKubernetesInformer(KubernetesInformer masterInformer, Func> cacheFactory, ILoggerFactory loggerFactory) + : base(masterInformer, SharedKubernetesInformerFactory(loggerFactory, cacheFactory)) + { + } + + private static Func> GetVersionPartitionedCacheFactory() + { + var partitionedSharedCache = new VersionPartitionedSharedCache(x => x.Metadata.Name, x => x.Metadata.ResourceVersion); + return () => partitionedSharedCache.CreatePartition(); + } + + private static Func, IInformer> SharedKubernetesInformerFactory(ILoggerFactory loggerFactory, Func> cacheFactory) => + masterInformer => new SharedInformer( + masterInformer, + loggerFactory.CreateLogger>>(), + x => KubernetesObject.KeySelector(x), + cacheFactory()); + + /// + public IObservable> GetResource(ResourceStreamType type) => base.GetResource(type, KubernetesInformerOptions.Default); + } +} diff --git a/src/KubernetesClient/Informers/SharedOptionsInformer.cs b/src/KubernetesClient/Informers/SharedOptionsInformer.cs new file mode 100644 index 000000000..ebac94426 --- /dev/null +++ b/src/KubernetesClient/Informers/SharedOptionsInformer.cs @@ -0,0 +1,39 @@ +using System; +using System.Collections.Generic; +using k8s.Informers.Notifications; + +namespace k8s.Informers +{ + /// + /// Manages multiple for each unique set of and ensures subscriptions are attached to correct one + /// + /// + /// + public class SharedOptionsInformer : IInformer + { + private readonly IInformer _masterInformer; + private readonly Func, IInformer> _sharedInformerFactory; + private readonly Dictionary> _sharedInformers = new Dictionary>(); + + public SharedOptionsInformer( + IInformer masterInformer, + Func, IInformer> sharedInformerFactory) + { + _masterInformer = masterInformer; + _sharedInformerFactory = sharedInformerFactory; + } + + + + public IObservable> GetResource(ResourceStreamType type, TOptions options) + { + if (!_sharedInformers.TryGetValue(options, out var sharedInformer)) + { + var optionLockedMasterInformer = _masterInformer.WithOptions(options); + sharedInformer = _sharedInformerFactory(optionLockedMasterInformer); + _sharedInformers.Add(options, sharedInformer); + } + return sharedInformer.GetResource(type); + } + } +} diff --git a/src/KubernetesClient/Kubernetes.ConfigInit.cs b/src/KubernetesClient/Kubernetes.ConfigInit.cs index 16cc03380..6fc3f2e9e 100644 --- a/src/KubernetesClient/Kubernetes.ConfigInit.cs +++ b/src/KubernetesClient/Kubernetes.ConfigInit.cs @@ -43,7 +43,7 @@ public Kubernetes(KubernetesClientConfiguration config, HttpClient httpClient, b ValidateConfig(config); CaCerts = config.SslCaCerts; SkipTlsVerify = config.SkipTlsVerify; - SetCredentials(config); + SetCredentials(config); } /// @@ -158,7 +158,7 @@ private void InitializeFromConfig(KubernetesClientConfiguration config) partial void CustomInitialize() { -#if NET452 +#if NET452 ServicePointManager.SecurityProtocol |= SecurityProtocolType.Tls12; #endif AppendDelegatingHandler(); @@ -168,8 +168,12 @@ partial void CustomInitialize() private void AppendDelegatingHandler() where T : DelegatingHandler, new() { var cur = FirstMessageHandler as DelegatingHandler; - - while (cur != null) + if (cur == null) + { + FirstMessageHandler = new T(); + return; + } + while(true) { var next = cur.InnerHandler as DelegatingHandler; diff --git a/src/KubernetesClient/Kubernetes.Generic.cs b/src/KubernetesClient/Kubernetes.Generic.cs new file mode 100644 index 000000000..834d355e8 --- /dev/null +++ b/src/KubernetesClient/Kubernetes.Generic.cs @@ -0,0 +1,212 @@ +using System; +using System.Collections.Generic; +using System.Net; +using System.Net.Http; +using System.Threading; +using System.Threading.Tasks; +using k8s.Models; +using Microsoft.Rest; +using Microsoft.Rest.Serialization; +using Newtonsoft.Json; + +namespace k8s +{ + public partial class Kubernetes + { + /// + public async Task>> ListWithHttpMessagesAsync(string namespaceParameter = default(string), + bool? allowWatchBookmarks = default(bool?), + string continueParameter = default(string), + string fieldSelector = default(string), + string labelSelector = default(string), + int? limit = default(int?), + string resourceVersion = default(string), + int? timeoutSeconds = default(int?), + bool? watch = default(bool?), + string pretty = default(string), + Dictionary> customHeaders = null, + CancellationToken cancellationToken = default(CancellationToken)) where T : IKubernetesObject + { + + var entityAttribute = KubernetesObject.GetTypeMetadata(); + if(entityAttribute?.PluralName == null) + throw new InvalidOperationException($"{typeof(T)} doesn't have a plural name set via {typeof(KubernetesEntityAttribute)}"); + + // Tracing + var shouldTrace = ServiceClientTracing.IsEnabled; + string invocationId = null; + if (shouldTrace) + { + invocationId = ServiceClientTracing.NextInvocationId.ToString(); + var tracingParameters = new Dictionary(); + tracingParameters.Add("allowWatchBookmarks", allowWatchBookmarks); + tracingParameters.Add("continueParameter", continueParameter); + tracingParameters.Add("fieldSelector", fieldSelector); + tracingParameters.Add("labelSelector", labelSelector); + tracingParameters.Add("limit", limit); + tracingParameters.Add("resourceVersion", resourceVersion); + tracingParameters.Add("timeoutSeconds", timeoutSeconds); + tracingParameters.Add("watch", watch); + tracingParameters.Add("namespaceParameter", namespaceParameter); + tracingParameters.Add("pretty", pretty); + tracingParameters.Add("cancellationToken", cancellationToken); + ServiceClientTracing.Enter(invocationId, this, "ListNamespacedPod", tracingParameters); + } + // Construct URL + var isLegacy = string.IsNullOrEmpty(entityAttribute.Group); + var segments = new List(); + segments.Add((BaseUri.AbsoluteUri.Trim('/'))); + if (isLegacy) + { + segments.Add("api"); + segments.Add("v1"); + } + else + { + segments.Add("apis"); + segments.Add(entityAttribute.Group); + } + + if (!string.IsNullOrEmpty(namespaceParameter)) + { + segments.Add("namespaces"); + segments.Add(System.Uri.EscapeDataString(namespaceParameter)); + } + segments.Add(entityAttribute.PluralName); + + var url = string.Join("/", segments); + var queryParameters = new List(); + if (allowWatchBookmarks != null) + { + queryParameters.Add(string.Format("allowWatchBookmarks={0}", System.Uri.EscapeDataString(SafeJsonConvert.SerializeObject(allowWatchBookmarks, SerializationSettings).Trim('"')))); + } + if (continueParameter != null) + { + queryParameters.Add(string.Format("continue={0}", System.Uri.EscapeDataString(continueParameter))); + } + if (fieldSelector != null) + { + queryParameters.Add(string.Format("fieldSelector={0}", System.Uri.EscapeDataString(fieldSelector))); + } + if (labelSelector != null) + { + queryParameters.Add(string.Format("labelSelector={0}", System.Uri.EscapeDataString(labelSelector))); + } + if (limit != null) + { + queryParameters.Add(string.Format("limit={0}", System.Uri.EscapeDataString(SafeJsonConvert.SerializeObject(limit, SerializationSettings).Trim('"')))); + } + if (resourceVersion != null) + { + queryParameters.Add(string.Format("resourceVersion={0}", System.Uri.EscapeDataString(resourceVersion))); + } + if (timeoutSeconds != null) + { + queryParameters.Add(string.Format("timeoutSeconds={0}", System.Uri.EscapeDataString(SafeJsonConvert.SerializeObject(timeoutSeconds, SerializationSettings).Trim('"')))); + } + if (watch != null) + { + queryParameters.Add(string.Format("watch={0}", System.Uri.EscapeDataString(SafeJsonConvert.SerializeObject(watch, SerializationSettings).Trim('"')))); + } + if (pretty != null) + { + queryParameters.Add(string.Format("pretty={0}", System.Uri.EscapeDataString(pretty))); + } + if (queryParameters.Count > 0) + { + url += "?" + string.Join("&", queryParameters); + } + // Create HTTP transport objects + var httpRequest = new HttpRequestMessage(); + HttpResponseMessage httpResponse = null; + httpRequest.Method = new HttpMethod("GET"); + httpRequest.RequestUri = new System.Uri(url); + // Set Headers + + + if (customHeaders != null) + { + foreach(var header in customHeaders) + { + if (httpRequest.Headers.Contains(header.Key)) + { + httpRequest.Headers.Remove(header.Key); + } + httpRequest.Headers.TryAddWithoutValidation(header.Key, header.Value); + } + } + + // Serialize Request + string requestContent = null; + // Set Credentials + if (Credentials != null) + { + cancellationToken.ThrowIfCancellationRequested(); + await Credentials.ProcessHttpRequestAsync(httpRequest, cancellationToken).ConfigureAwait(false); + } + // Send Request + if (shouldTrace) + { + ServiceClientTracing.SendRequest(invocationId, httpRequest); + } + cancellationToken.ThrowIfCancellationRequested(); + httpResponse = await HttpClient.SendAsync(httpRequest, cancellationToken).ConfigureAwait(false); + if (shouldTrace) + { + ServiceClientTracing.ReceiveResponse(invocationId, httpResponse); + } + var statusCode = httpResponse.StatusCode; + cancellationToken.ThrowIfCancellationRequested(); + string responseContent = null; + if ((int)statusCode != 200 && (int)statusCode != 401) + { + var ex = new HttpOperationException(string.Format("Operation returned an invalid status code '{0}'", statusCode)); + if (httpResponse.Content != null) { + responseContent = await httpResponse.Content.ReadAsStringAsync().ConfigureAwait(false); + } + else { + responseContent = string.Empty; + } + ex.Request = new HttpRequestMessageWrapper(httpRequest, requestContent); + ex.Response = new HttpResponseMessageWrapper(httpResponse, responseContent); + if (shouldTrace) + { + ServiceClientTracing.Error(invocationId, ex); + } + httpRequest.Dispose(); + if (httpResponse != null) + { + httpResponse.Dispose(); + } + throw ex; + } + // Create Result + var result = new HttpOperationResponse>(); + result.Request = httpRequest; + result.Response = httpResponse; + // Deserialize Response + if ((int)statusCode == 200) + { + responseContent = await httpResponse.Content.ReadAsStringAsync().ConfigureAwait(false); + try + { + result.Body = SafeJsonConvert.DeserializeObject>(responseContent, DeserializationSettings); + } + catch (JsonException ex) + { + httpRequest.Dispose(); + if (httpResponse != null) + { + httpResponse.Dispose(); + } + throw new SerializationException("Unable to deserialize the response.", responseContent, ex); + } + } + if (shouldTrace) + { + ServiceClientTracing.Exit(invocationId, result); + } + return result; + } + } +} diff --git a/src/KubernetesClient/KubernetesClient.csproj b/src/KubernetesClient/KubernetesClient.csproj index c973f66b7..f1803cb50 100644 --- a/src/KubernetesClient/KubernetesClient.csproj +++ b/src/KubernetesClient/KubernetesClient.csproj @@ -8,27 +8,28 @@ https://github.com/kubernetes-client/csharp https://raw.githubusercontent.com/kubernetes/kubernetes/master/logo/logo.png kubernetes;docker;containers; - - netstandard2.0;net452;netcoreapp2.1 + netstandard2.0;net461;netcoreapp2.1 netstandard2.0;netcoreapp2.1 k8s true kubernetes-client.snk true 1701;1702;1591;1570;1572;1573;1574 - + true - + true snupkg + 8 + @@ -36,8 +37,12 @@ + + + + diff --git a/src/KubernetesClient/KubernetesClientConfiguration.HttpClientHandler.cs b/src/KubernetesClient/KubernetesClientConfiguration.HttpClientHandler.cs index 66894458f..144e55874 100644 --- a/src/KubernetesClient/KubernetesClientConfiguration.HttpClientHandler.cs +++ b/src/KubernetesClient/KubernetesClientConfiguration.HttpClientHandler.cs @@ -14,7 +14,7 @@ public HttpClientHandler CreateDefaultHttpClientHandler() { { if(this.SkipTlsVerify) { - httpClientHandler.ServerCertificateCustomValidationCallback = + httpClientHandler.ServerCertificateCustomValidationCallback = (sender, certificate, chain, sslPolicyErrors) => true; } else @@ -47,5 +47,7 @@ public void AddCertificates(HttpClientHandler handler) { #endif } } + + public static DelegatingHandler CreateWatchHandler() => new WatcherDelegatingHandler(); } -} \ No newline at end of file +} diff --git a/src/KubernetesClient/KubernetesObject.cs b/src/KubernetesClient/KubernetesObject.cs index d7994d5ab..6eb0785a5 100644 --- a/src/KubernetesClient/KubernetesObject.cs +++ b/src/KubernetesClient/KubernetesObject.cs @@ -1,3 +1,6 @@ +using System; +using System.Reflection; +using k8s.Models; using Newtonsoft.Json; namespace k8s @@ -32,5 +35,11 @@ public class KubernetesObject : IKubernetesObject /// [JsonProperty(PropertyName = "kind")] public string Kind { get; set; } + + public static KubernetesEntityAttribute GetTypeMetadata() where T : IKubernetesObject + { + return typeof(T).GetCustomAttribute(); + } + public static Func, string> KeySelector => o => o.Metadata.Name; } } diff --git a/src/KubernetesClient/Properties/Assembly.cs b/src/KubernetesClient/Properties/Assembly.cs new file mode 100644 index 000000000..f2764d2b3 --- /dev/null +++ b/src/KubernetesClient/Properties/Assembly.cs @@ -0,0 +1,3 @@ +using System.Runtime.CompilerServices; + +[assembly: InternalsVisibleTo("KubernetesClient.Tests, PublicKey=00240000048000009400000006020000002400005253413100040000010001004917ad4e106c573cc5dbb3b7456de8b6c07128ae43de292752b339eb423de60f0db6a6c0cb21e6640fc672cc84df4a772db85df1505e5dd08c98d5d115eed7a7b59c67fe1f4b32fa716b7177743a417b3fcf88606861650a81f565ac6614abbf8b6b7710436edb497a83974165f9fe6995b70af13047a110bf63cdbfa45f89ac")] diff --git a/src/KubernetesClient/Watcher.cs b/src/KubernetesClient/Watcher.cs index 045c4b95b..bff650fa8 100644 --- a/src/KubernetesClient/Watcher.cs +++ b/src/KubernetesClient/Watcher.cs @@ -1,10 +1,14 @@ using System; +using System.Diagnostics; using System.IO; +using System.Reactive.Disposables; +using System.Reactive.Linq; using System.Runtime.Serialization; using System.Threading; using System.Threading.Tasks; using k8s.Exceptions; using k8s.Models; +using Microsoft.Extensions.Logging; using Microsoft.Rest; using Microsoft.Rest.Serialization; @@ -18,7 +22,9 @@ public enum WatchEventType [EnumMember(Value = "DELETED")] Deleted, - [EnumMember(Value = "ERROR")] Error + [EnumMember(Value = "ERROR")] Error, + + [EnumMember(Value = "BOOKMARK")] Bookmark } public class Watcher : IDisposable @@ -96,7 +102,6 @@ private async Task WatcherLoop(CancellationToken cancellationToken) Watching = true; string line; _streamReader = await _streamReaderCreator().ConfigureAwait(false); - // ReadLineAsync will return null when we've reached the end of the stream. while ((line = await _streamReader.ReadLineAsync().ConfigureAwait(false)) != null) { @@ -171,6 +176,22 @@ public static Watcher Watch(this Task> respons return content.StreamReader; } , onEvent, onError, onClosed); } + public static IObservable.WatchEvent> Watch(this Task>> responseTask) where T : IKubernetesObject + { + return Observable.Create.WatchEvent>(observer => + { + void OnNext(WatchEventType type, T item) => observer.OnNext(new k8s.Watcher.WatchEvent {Type = type, Object = item}); + var watcher = responseTask.Watch>(OnNext, observer.OnError, observer.OnCompleted); + var eventSubscription = Disposable.Create(() => + { + watcher.OnEvent -= OnNext; + watcher.OnError -= observer.OnError; + watcher.OnClosed -= observer.OnCompleted; + }); + return new CompositeDisposable(watcher, eventSubscription); + }); + + } /// /// create a watch object from a call to api server with watch=true diff --git a/tests/KubernetesClient.Tests/DeltaFifoTests.cs b/tests/KubernetesClient.Tests/DeltaFifoTests.cs new file mode 100644 index 000000000..9901a63f3 --- /dev/null +++ b/tests/KubernetesClient.Tests/DeltaFifoTests.cs @@ -0,0 +1,135 @@ +using System.Collections.Generic; +using System.Threading.Tasks; +using System.Threading.Tasks.Dataflow; +using FluentAssertions; +using k8s.Controllers; +using k8s.Informers.Notifications; +using k8s.Tests.Utils; +using Microsoft.EntityFrameworkCore; +using Microsoft.Extensions.Logging; +using Microsoft.Reactive.Testing; +using Xunit; +using Xunit.Abstractions; +using static k8s.Informers.Notifications.EventTypeFlags; + +namespace k8s.Tests +{ + public class DeltaFifoTests + { + private readonly ILogger _log; + + public DeltaFifoTests(ITestOutputHelper output) + { + _log = new XunitLogger(output); + } + + + + public static IEnumerable DistributeWorkTestData() + { + yield return new object[] + { + nameof(TestData.Events.ResetWith2_Delay_UpdateToEach), // description + TestData.Events.ResetWith2_Delay_UpdateToEach, // events + new [] // expected + { + new[] + { + new TestResource(1).ToResourceEvent(ResetStart), + }, + new[] + { + new TestResource(2).ToResourceEvent(ResetEnd), + }, + new[] + { + new TestResource(1).ToResourceEvent(Modify), + new TestResource(1).ToResourceEvent(Modify), + } + } + }; + yield return new object[] + { + nameof(TestData.Events.EmptyReset_Delay_Add), // description + TestData.Events.EmptyReset_Delay_Add, // events + new [] // expected + { + new[] + { + new TestResource(1).ToResourceEvent(Add), + } + } + }; + yield return new object[] + { + nameof(TestData.Events.Sync_Delay_SyncAndUpdate), // description + TestData.Events.Sync_Delay_SyncAndUpdate, // events + new [] // expected + { + new[] + { + new TestResource(1).ToResourceEvent(Sync), + }, + new[] + { + new TestResource(1).ToResourceEvent(Modify), // modify "kicks" out the other sync + } + } + }; + + yield return new object[] + { + nameof(TestData.Events.ResetWith2_Delay_UpdateBoth_Delay_Add1), // description + TestData.Events.ResetWith2_Delay_UpdateBoth_Delay_Add1, // events + new [] // expected + { + new[] + { + new TestResource(1).ToResourceEvent(ResetStart), + }, + new[] + { + new TestResource(2).ToResourceEvent(ResetEnd), + }, + new[] + { + new TestResource(1).ToResourceEvent(Modify), + new TestResource(1).ToResourceEvent(Modify), + }, + new[] + { + new TestResource(3).ToResourceEvent(Add), + } + } + }; + } + + [Theory] + [MemberData(nameof(DistributeWorkTestData))] + + public async Task DistributeWork(string description, ScheduledEvent[] events, ResourceEvent[][] expected) + { + _log.LogInformation(description); + var sut = new ResourceEventDeltaBlock(x => x.Key); + var testScheduler = new TestScheduler(); + var results = new List>>(); + var actionBlock = new ActionBlock>>(deltas => + { + _log.LogTrace($"Worker called for {string.Join("",deltas)}"); + results.Add(deltas); + testScheduler.Sleep(300); + }, new ExecutionDataflowBlockOptions() + { + BoundedCapacity = 1, + MaxDegreeOfParallelism = 1 + }); + sut.LinkTo(actionBlock, new DataflowLinkOptions {PropagateCompletion = true}); + + events.ToTestObservable(testScheduler, logger: _log).Subscribe(sut.AsObserver()); + await actionBlock.Completion.TimeoutIfNotDebugging(); + + results.Should().BeEquivalentTo(expected); + } + } + +} diff --git a/tests/KubernetesClient.Tests/Kubernetes.WebSockets.Tests.cs b/tests/KubernetesClient.Tests/Kubernetes.WebSockets.Tests.cs index 62a339f79..f87a3a285 100644 --- a/tests/KubernetesClient.Tests/Kubernetes.WebSockets.Tests.cs +++ b/tests/KubernetesClient.Tests/Kubernetes.WebSockets.Tests.cs @@ -10,7 +10,7 @@ using System.Threading.Tasks; using Xunit; -namespace k8s.tests +namespace k8s.Tests { public class KubernetesExecTests { diff --git a/tests/KubernetesClient.Tests/KubernetesClient.Tests.csproj b/tests/KubernetesClient.Tests/KubernetesClient.Tests.csproj index ac41d177e..abbb34694 100755 --- a/tests/KubernetesClient.Tests/KubernetesClient.Tests.csproj +++ b/tests/KubernetesClient.Tests/KubernetesClient.Tests.csproj @@ -1,11 +1,15 @@ false - k8s.tests + k8s.Tests netcoreapp2.1;netcoreapp2.0 + 8 + true + ..\..\src\KubernetesClient\kubernetes-client.snk + true @@ -13,11 +17,19 @@ + + + + + + + + @@ -31,6 +43,8 @@ + + diff --git a/tests/KubernetesClient.Tests/KubernetesResourceInformerTests.cs b/tests/KubernetesClient.Tests/KubernetesResourceInformerTests.cs new file mode 100644 index 000000000..8b7a7006d --- /dev/null +++ b/tests/KubernetesClient.Tests/KubernetesResourceInformerTests.cs @@ -0,0 +1,217 @@ +using System; +using System.Collections.Generic; +using System.Linq; +using System.Net; +using System.Net.Http; +using System.Reactive.Linq; +using System.Reactive.Threading.Tasks; +using System.Threading; +using System.Threading.Tasks; +using FluentAssertions; +using k8s.Informers; +using k8s.Informers.FaultTolerance; +using k8s.Informers.Notifications; +using k8s.Models; +using k8s.Tests.Mock; +using k8s.Tests.Utils; +using Microsoft.Extensions.Logging; +using Microsoft.Rest; +using Newtonsoft.Json; +using Newtonsoft.Json.Converters; +using NSubstitute; +using NSubstitute.ExceptionExtensions; +using WireMock.Matchers; +using WireMock.RequestBuilders; +using WireMock.ResponseBuilders; +using WireMock.Server; +using Xunit; +using Xunit.Abstractions; + +namespace k8s.Tests +{ + public class KubernetesResourceInformerTests : IDisposable + { + private readonly ITestOutputHelper _testOutput; + private WireMockServer _server; + private Kubernetes _kubernetes; + private ILogger _log; + + public KubernetesResourceInformerTests(ITestOutputHelper testOutput) + { + _testOutput = testOutput; + _log = new XunitLogger(testOutput); + JsonConvert.DefaultSettings = () => new JsonSerializerSettings() {Converters = new[] {new StringEnumConverter()}, Formatting = Formatting.None}; + _server = WireMockServer.Start(); + _kubernetes = new Kubernetes(new KubernetesClientConfiguration {Host = _server.Urls.First()}); + } + + [Fact] + public async Task List() + { + _server.Given(Request.Create().WithParam("watch",MatchBehaviour.RejectOnMatch,"true").UsingGet()) + .RespondWith(Response.Create().WithBodyAsJson(TestData.ListPodsTwoItems)); + var sut = new KubernetesInformer(_kubernetes); + + var result = await sut.GetResource(ResourceStreamType.List).ToList().TimeoutIfNotDebugging();; + + result.Should().HaveCount(2); + result[0].EventFlags.Should().HaveFlag(EventTypeFlags.ResetStart); + result[1].EventFlags.Should().HaveFlag(EventTypeFlags.ResetEnd); + result[0].Value.Should().BeEquivalentTo(TestData.ListPodsTwoItems.Items[0]); + result[1].Value.Should().BeEquivalentTo(TestData.ListPodsTwoItems.Items[1]); + } + [Fact] + public async Task Watch() + { + _server.Given(Request.Create().WithParam("watch", MatchBehaviour.AcceptOnMatch, "true").UsingGet()) + .RespondWith(Response.Create().WithBodyAsJson(TestData.TestPod1ResourceVersion2.ToWatchEvent(WatchEventType.Added))); + var sut = new KubernetesInformer(_kubernetes, null, () => false); + + var result = await sut.GetResource(ResourceStreamType.Watch).ToList().TimeoutIfNotDebugging(); + + result.Should().HaveCount(1); + result[0].EventFlags.Should().HaveFlag(EventTypeFlags.Add); + result[0].Value.Should().BeEquivalentTo(TestData.TestPod1ResourceVersion2); + } + + [Fact] + public async Task ListWatch() + { + + _server.Given(Request.Create().UsingGet()).AtPriority(100) + .RespondWith(Response.Create().WithBodyAsJson(TestData.ListPodsTwoItems)); + _server.Given(Request.Create().WithParam("watch", MatchBehaviour.AcceptOnMatch, "true").UsingGet()) + .RespondWith(Response.Create().WithBodyAsJson(TestData.TestPod1ResourceVersion2.ToWatchEvent(WatchEventType.Modified))); + + var sut = new KubernetesInformer(_kubernetes, new RetryPolicy((e,i) => false, i => TimeSpan.Zero), () => false); + + var result = await sut.GetResource(ResourceStreamType.ListWatch).ToList().TimeoutIfNotDebugging(); + result.Should().HaveCount(3); + result[0].EventFlags.Should().HaveFlag(EventTypeFlags.ResetStart); + result[0].Value.Should().BeEquivalentTo(TestData.TestPod1ResourceVersion1); + result[1].EventFlags.Should().HaveFlag(EventTypeFlags.ResetEnd); + result[1].Value.Should().BeEquivalentTo(TestData.TestPod2ResourceVersion1); + result[2].EventFlags.Should().HaveFlag(EventTypeFlags.Modify); + result[2].Value.Should().BeEquivalentTo(TestData.TestPod1ResourceVersion2); + } + + [Fact] + public async Task WatchWithRetryPolicy_WhenApiCallThrowsTransient_ShouldRetry() + { + var kubernetes = Substitute.For(); + kubernetes.ListWithHttpMessagesAsync().ThrowsForAnyArgs(info => new HttpRequestException()); + var sut = new KubernetesInformer(kubernetes, new RetryPolicy((e,i) => i < 2, i => TimeSpan.Zero), () => false); + Func act = async () => await sut.GetResource(ResourceStreamType.ListWatch).ToList().TimeoutIfNotDebugging(); + act.Should().Throw(); + await kubernetes.ReceivedWithAnyArgs(2).ListWithHttpMessagesAsync(); + await kubernetes.Received().ListWithHttpMessagesAsync(cancellationToken: Arg.Any()); + } + [Fact] + public void Watch_InterruptedWatchAndGoneResourceVersion_ShouldReList() + { + var kubernetes = Substitute.For(); + + kubernetes.ListWithHttpMessagesAsync(cancellationToken: Arg.Any()) + .Returns( + _ => TestData.ListPodEmpty.ToHttpOperationResponse(), + _ => throw new TestCompleteException()); + kubernetes.ListWithHttpMessagesAsync( + cancellationToken: Arg.Any(), + watch: true, + allowWatchBookmarks: true, + resourceVersion: TestData.ListPodEmpty.Metadata.ResourceVersion, + timeoutSeconds: int.MaxValue) + .Returns( + _ => TestData.TestPod1ResourceVersion1.ToWatchEvent(WatchEventType.Added).ToHttpOperationResponse(), + _ => new HttpOperationResponse>() {Response = new HttpResponseMessage() {StatusCode = HttpStatusCode.Gone}}); + + var sut = new KubernetesInformer(kubernetes, RetryPolicy.None, () => true); + Func act = async () =>await sut.GetResource(ResourceStreamType.ListWatch).ToList().TimeoutIfNotDebugging(); + act.Should().Throw(); + + Received.InOrder(() => + { + // initial list + kubernetes.ListWithHttpMessagesAsync(cancellationToken: Arg.Any()); + // watch after list + kubernetes.ListWithHttpMessagesAsync( + cancellationToken: Arg.Any(), + watch: true, + allowWatchBookmarks: true, + resourceVersion: TestData.ListPodEmpty.Metadata.ResourceVersion, + timeoutSeconds: int.MaxValue); + // resume watch with same resource version - server responded with gone + kubernetes.ListWithHttpMessagesAsync( + cancellationToken: Arg.Any(), + watch: true, + allowWatchBookmarks: true, + resourceVersion: TestData.ListPodEmpty.Metadata.ResourceVersion, + timeoutSeconds: int.MaxValue); + // restart the whole thing with list without version + kubernetes.ListWithHttpMessagesAsync(cancellationToken: Arg.Any()); + }); + } + [Fact] + public void Watch_BookmarkInterrupted_ShouldRewatchWithBookmarkResourceVersion() + { + var kubernetes = Substitute.For(); + + kubernetes.ListWithHttpMessagesAsync(cancellationToken: Arg.Any()) + .Returns(_ => TestData.ListPodEmpty.ToHttpOperationResponse()); + kubernetes.ListWithHttpMessagesAsync( + watch: true, + resourceVersion: TestData.ListPodEmpty.Metadata.ResourceVersion, + allowWatchBookmarks: true, + cancellationToken: Arg.Any(), + timeoutSeconds: int.MaxValue) + .Returns( + _ => new V1Pod() + { + Kind = V1Pod.KubeKind, + ApiVersion = V1Pod.KubeApiVersion, + Metadata = new V1ObjectMeta() + { + ResourceVersion = TestData.ListPodOneItem.Metadata.ResourceVersion + } + } + .ToWatchEvent(WatchEventType.Bookmark) + .ToHttpOperationResponse()); + kubernetes.ListWithHttpMessagesAsync( + watch: true, + allowWatchBookmarks: true, + resourceVersion: TestData.ListPodOneItem.Metadata.ResourceVersion, + cancellationToken: Arg.Any(), + timeoutSeconds: int.MaxValue) + .Throws(); + + var sut = new KubernetesInformer(kubernetes, RetryPolicy.None, () => true); + Func act = async () =>await sut.GetResource(ResourceStreamType.ListWatch).ToList().TimeoutIfNotDebugging(); + + act.Should().Throw(); + Received.InOrder(() => + { + // initial list + kubernetes.ListWithHttpMessagesAsync(cancellationToken: Arg.Any()); + // watch after list with same version as returned by list - receive bookmark with new version + kubernetes.ListWithHttpMessagesAsync( + resourceVersion: TestData.ListPodEmpty.Metadata.ResourceVersion, + cancellationToken: Arg.Any(), + watch: true, + allowWatchBookmarks: true, + timeoutSeconds: int.MaxValue); + // resume watch with bookmark version + kubernetes.ListWithHttpMessagesAsync( + resourceVersion: TestData.ListPodOneItem.Metadata.ResourceVersion, + cancellationToken: Arg.Any(), + watch: true, + allowWatchBookmarks: true, + timeoutSeconds: int.MaxValue); + }); + } + + public void Dispose() + { + _server?.Dispose(); + } + } +} diff --git a/tests/KubernetesClient.Tests/Mock/MockKubeApiServer.cs b/tests/KubernetesClient.Tests/Mock/MockKubeApiServer.cs index 4869c4c86..0120aae56 100644 --- a/tests/KubernetesClient.Tests/Mock/MockKubeApiServer.cs +++ b/tests/KubernetesClient.Tests/Mock/MockKubeApiServer.cs @@ -1,6 +1,8 @@ using System; +using System.Collections.Generic; using System.Linq; using System.Net; +using System.Net.Http; using System.Threading.Tasks; using k8s.Tests.Logging; using Microsoft.AspNetCore; @@ -10,12 +12,54 @@ using Microsoft.AspNetCore.Http; using Microsoft.AspNetCore.Server.Kestrel.Core; using Microsoft.Extensions.Logging; +using Nito.AsyncEx; using Xunit.Abstractions; namespace k8s.Tests.Mock { public sealed class MockKubeApiServer : IDisposable { + public class ResponsePlayer + { + Action _httpContextConfig; + AsyncManualResetEvent _nextSignal = new AsyncManualResetEvent(false); + IEnumerable _responses; + + public ResponsePlayer(params string[] responses) : this(null, responses) + { + + } + public ResponsePlayer(Action httpContextConfig = null, params string[] responses) + { + _httpContextConfig = httpContextConfig ?? (_ => {}); + _responses = responses; + } + + + public Func> ResponseConfigurer() + { + return c => + { + _httpContextConfig(c); + async IAsyncEnumerable GetAsyncEnumerable() + { + foreach(var item in _responses) + { + await _nextSignal.WaitAsync(); + yield return item; + _nextSignal.Reset(); + } + } + return GetAsyncEnumerable(); + }; + } + public void SendNextResponse() + { + _nextSignal.Set(); + } + } + + // paste from minikube /api/v1/namespaces/default/pods public const string MockPodResponse = "{\r\n \"kind\": \"PodList\",\r\n \"apiVersion\": \"v1\",\r\n \"metadata\": {\r\n \"selfLink\": \"/api/v1/namespaces/default/pods\",\r\n \"resourceVersion\": \"1762810\"\r\n },\r\n \"items\": [\r\n {\r\n \"metadata\": {\r\n \"name\": \"nginx-1493591563-xb2v4\",\r\n \"generateName\": \"nginx-1493591563-\",\r\n \"namespace\": \"default\",\r\n \"selfLink\": \"/api/v1/namespaces/default/pods/nginx-1493591563-xb2v4\",\r\n \"uid\": \"ac1abb94-9c58-11e7-aaf5-00155d744505\",\r\n \"resourceVersion\": \"1737928\",\r\n \"creationTimestamp\": \"2017-09-18T10:03:51Z\",\r\n \"labels\": {\r\n \"app\": \"nginx\",\r\n \"pod-template-hash\": \"1493591563\"\r\n },\r\n \"annotations\": {\r\n \"kubernetes.io/created-by\": \"{\\\"kind\\\":\\\"SerializedReference\\\",\\\"apiVersion\\\":\\\"v1\\\",\\\"reference\\\":{\\\"kind\\\":\\\"ReplicaSet\\\",\\\"namespace\\\":\\\"default\\\",\\\"name\\\":\\\"nginx-1493591563\\\",\\\"uid\\\":\\\"ac013b63-9c58-11e7-aaf5-00155d744505\\\",\\\"apiVersion\\\":\\\"extensions\\\",\\\"resourceVersion\\\":\\\"5306\\\"}}\\n\"\r\n },\r\n \"ownerReferences\": [\r\n {\r\n \"apiVersion\": \"extensions/v1beta1\",\r\n \"kind\": \"ReplicaSet\",\r\n \"name\": \"nginx-1493591563\",\r\n \"uid\": \"ac013b63-9c58-11e7-aaf5-00155d744505\",\r\n \"controller\": true,\r\n \"blockOwnerDeletion\": true\r\n }\r\n ]\r\n },\r\n \"spec\": {\r\n \"volumes\": [\r\n {\r\n \"name\": \"default-token-3zzcj\",\r\n \"secret\": {\r\n \"secretName\": \"default-token-3zzcj\",\r\n \"defaultMode\": 420\r\n }\r\n }\r\n ],\r\n \"containers\": [\r\n {\r\n \"name\": \"nginx\",\r\n \"image\": \"nginx\",\r\n \"resources\": {},\r\n \"volumeMounts\": [\r\n {\r\n \"name\": \"default-token-3zzcj\",\r\n \"readOnly\": true,\r\n \"mountPath\": \"/var/run/secrets/kubernetes.io/serviceaccount\"\r\n }\r\n ],\r\n \"terminationMessagePath\": \"/dev/termination-log\",\r\n \"terminationMessagePolicy\": \"File\",\r\n \"imagePullPolicy\": \"Always\"\r\n }\r\n ],\r\n \"restartPolicy\": \"Always\",\r\n \"terminationGracePeriodSeconds\": 30,\r\n \"dnsPolicy\": \"ClusterFirst\",\r\n \"serviceAccountName\": \"default\",\r\n \"serviceAccount\": \"default\",\r\n \"nodeName\": \"ubuntu\",\r\n \"securityContext\": {},\r\n \"schedulerName\": \"default-scheduler\"\r\n },\r\n \"status\": {\r\n \"phase\": \"Running\",\r\n \"conditions\": [\r\n {\r\n \"type\": \"Initialized\",\r\n \"status\": \"True\",\r\n \"lastProbeTime\": null,\r\n \"lastTransitionTime\": \"2017-09-18T10:03:51Z\"\r\n },\r\n {\r\n \"type\": \"Ready\",\r\n \"status\": \"True\",\r\n \"lastProbeTime\": null,\r\n \"lastTransitionTime\": \"2017-10-12T07:09:21Z\"\r\n },\r\n {\r\n \"type\": \"PodScheduled\",\r\n \"status\": \"True\",\r\n \"lastProbeTime\": null,\r\n \"lastTransitionTime\": \"2017-09-18T10:03:51Z\"\r\n }\r\n ],\r\n \"hostIP\": \"192.168.188.42\",\r\n \"podIP\": \"172.17.0.5\",\r\n \"startTime\": \"2017-09-18T10:03:51Z\",\r\n \"containerStatuses\": [\r\n {\r\n \"name\": \"nginx\",\r\n \"state\": {\r\n \"running\": {\r\n \"startedAt\": \"2017-10-12T07:09:20Z\"\r\n }\r\n },\r\n \"lastState\": {\r\n \"terminated\": {\r\n \"exitCode\": 0,\r\n \"reason\": \"Completed\",\r\n \"startedAt\": \"2017-10-10T21:35:51Z\",\r\n \"finishedAt\": \"2017-10-12T07:07:37Z\",\r\n \"containerID\": \"docker://94df3f3965807421ad6dc76618e00b76cb15d024919c4946f3eb46a92659c62a\"\r\n }\r\n },\r\n \"ready\": true,\r\n \"restartCount\": 7,\r\n \"image\": \"nginx:latest\",\r\n \"imageID\": \"docker-pullable://nginx@sha256:004ac1d5e791e705f12a17c80d7bb1e8f7f01aa7dca7deee6e65a03465392072\",\r\n \"containerID\": \"docker://fa11bdd48c9b7d3a6c4c3f9b6d7319743c3455ab8d00c57d59c083b319b88194\"\r\n }\r\n ],\r\n \"qosClass\": \"BestEffort\"\r\n }\r\n }\r\n ]\r\n}" @@ -24,18 +68,43 @@ public sealed class MockKubeApiServer : IDisposable private readonly IWebHost _webHost; public MockKubeApiServer(ITestOutputHelper testOutput, Func> shouldNext = null, Action listenConfigure = null, - string resp = MockPodResponse) + string resp = MockPodResponse) : this(testOutput, ToAsyncEnumerable(shouldNext, resp), listenConfigure) { - shouldNext = shouldNext ?? (_ => Task.FromResult(true)); + } + + private static Func> ToAsyncEnumerable(Func> shouldNextFunc, string resp) => + c => + { + var shouldNext = shouldNextFunc != null ? shouldNextFunc(c) : Task.FromResult(true); + async IAsyncEnumerable ToAsyncEnumerable() + { + if (await shouldNext) + { + yield return resp; + } + } + return ToAsyncEnumerable(); + }; + + public MockKubeApiServer(ITestOutputHelper testOutput, ResponsePlayer response, Action listenConfigure = null) : this(testOutput, response.ResponseConfigurer(), listenConfigure) + { + } + + public MockKubeApiServer(ITestOutputHelper testOutput, Func> response, Action listenConfigure = null) + { + _client = new Lazy(() => new HttpClient() {BaseAddress = Uri}); listenConfigure = listenConfigure ?? (_ => { }); _webHost = WebHost.CreateDefaultBuilder() .Configure(app => app.Run(async httpContext => { - if (await shouldNext(httpContext)) + var content = response(httpContext); + await foreach (var chunk in content) { - await httpContext.Response.WriteAsync(resp); + await httpContext.Response.WriteAsync(chunk); + await httpContext.Response.Body.FlushAsync(); } + })) .UseKestrel(options => { options.Listen(IPAddress.Loopback, 0, listenConfigure); }) .ConfigureLogging(logging => @@ -53,6 +122,9 @@ public MockKubeApiServer(ITestOutputHelper testOutput, Func _webHost.ServerFeatures.Get().Addresses .Select(a => new Uri(a)).First(); + private readonly Lazy _client; + public HttpClient Client => _client.Value; + public void Dispose() { _webHost.StopAsync(); diff --git a/tests/KubernetesClient.Tests/Mock/MockWebSocket.cs b/tests/KubernetesClient.Tests/Mock/MockWebSocket.cs index e0bdad547..835b8b0e4 100644 --- a/tests/KubernetesClient.Tests/Mock/MockWebSocket.cs +++ b/tests/KubernetesClient.Tests/Mock/MockWebSocket.cs @@ -1,11 +1,11 @@ -using Nito.AsyncEx; using System; using System.Collections.Concurrent; using System.Net.WebSockets; using System.Threading; using System.Threading.Tasks; +using Nito.AsyncEx; -namespace k8s.tests.Mock +namespace k8s.Tests.Mock { public class MockWebSocket : WebSocket { @@ -20,7 +20,7 @@ public MockWebSocket(string subProtocol = null) { this.subProtocol = subProtocol; } - + public void SetState(WebSocketState state) { this.state = state; diff --git a/tests/KubernetesClient.Tests/SharedInformerTests.cs b/tests/KubernetesClient.Tests/SharedInformerTests.cs new file mode 100644 index 000000000..801487d0d --- /dev/null +++ b/tests/KubernetesClient.Tests/SharedInformerTests.cs @@ -0,0 +1,306 @@ +using System; +using System.Collections; +using System.Collections.Generic; +using System.Linq; +using System.Reactive.Concurrency; +using System.Reactive.Disposables; +using System.Reactive.Linq; +using System.Reflection; +using System.Text.RegularExpressions; +using System.Threading.Tasks; +using FluentAssertions; +using k8s.Informers; +using k8s.Informers.Cache; +using k8s.Informers.Notifications; +using k8s.Models; +using k8s.Tests.Utils; +using Microsoft.Extensions.Logging; +using Microsoft.Extensions.Logging.Abstractions; +using Microsoft.Reactive.Testing; +using NSubstitute; +using Xunit; +using Xunit.Abstractions; +using static k8s.Informers.Notifications.EventTypeFlags; + +namespace k8s.Tests +{ + public class SharedInformerTests + { + private readonly ILogger _log; + + public SharedInformerTests(ITestOutputHelper output) + { + _log = new XunitLogger(output); + } + + public static IEnumerable GetComputedTestScenarios() + { + var scenariosNamesUnaffectedByComputeOptions = TestData.Events.AllScenarios + .Where(x => !x.Item1.Contains("Sync")) + .Where(x => Regex.Matches(x.Item1, "Reset").Count < 2); + // with computed option that shouldn't act any differently then non computed + foreach (var (description, events) in scenariosNamesUnaffectedByComputeOptions) + yield return new object[] + { + $"Computed_{description}", // description + events, + events.ToBasicExpected() // expecting same as master informer + }; + // yield break; + yield return new object[] + { + $"Computed__{nameof(TestData.Events.ResetWith1_Delay_ResetWith2_ImplicitAddition)}", // description + TestData.Events.EmptyReset_Delay_Add, // events + new [] // expected + { + ResourceEvent.ResetEmpty, + new TestResource(1).ToResourceEvent(Add) + } + }; + + yield return new object[] + { + $"Computed_{nameof(TestData.Events.ResetWith1_Delay_ResetWith2_ImplicitAddition)}", // description + TestData.Events.ResetWith1_Delay_ResetWith2_ImplicitAddition, // events + new [] // expected + { + new TestResource(1).ToResourceEvent(ResetStart | ResetEnd), + new TestResource(1).ToResourceEvent(ResetStart | ResetEnd), + new TestResource(2).ToResourceEvent(Add | Computed), + } + }; + + yield return new object[] + { + $"Computed_{nameof(TestData.Events.ResetWith2_Delay_ResetWith1One_ImplicitDeletion)}", // description + TestData.Events.ResetWith2_Delay_ResetWith1One_ImplicitDeletion, // events + new [] // expected + { + new TestResource(1).ToResourceEvent(ResetStart), + new TestResource(2).ToResourceEvent(ResetEnd), + new TestResource(2).ToResourceEvent(Delete | Computed), + new TestResource(1).ToResourceEvent(ResetStart | ResetEnd) + } + }; + + yield return new object[] + { + $"Computed_ImplicitUpdateAfterReset /w Comparer", // description + TestData.Events.ResetWith2_Delay_ResetWith2OneDifferentVersion_ImplicitUpdate, // events + new [] // expected + { + new TestResource(1,1).ToResourceEvent(ResetStart), + new TestResource(2,1).ToResourceEvent(ResetEnd), + new TestResource(2, 2).ToResourceEvent( Modify | Computed), + new TestResource(1, 1).ToResourceEvent(ResetStart | ResetEnd) + }, + TestResource.KeyVersionComparer + }; + yield return new object[] + { + $"Computed_ImplicitUpdateAfterReset /wo Comparer", // description + TestData.Events.ResetWith2_Delay_ResetWith2OneDifferentVersion_ImplicitUpdate, // events + new [] // expected + { + new TestResource(1,1).ToResourceEvent(ResetStart), + new TestResource(2,1).ToResourceEvent(ResetEnd), + new TestResource(1,1).ToResourceEvent(ResetStart), + new TestResource(2,2).ToResourceEvent(ResetEnd), + } + }; + } + + public static IEnumerable GetTestScenarios() + { + var masterInformerScenarios = TestData.Events.AllScenarios + .Where(x => !x.Item1.Contains("Sync")) + .ToList(); + foreach (var (description, events) in masterInformerScenarios) + yield return new object[] + { + $"{description}", // description + events, + events.ToBasicExpected() // expecting same as master informer + }; + + } + + [Theory] + [MemberData(nameof(GetTestScenarios))] + public async Task FirstSubscriber(string description, ScheduledEvent[] scenario, ResourceEvent[] expected) + { + for (int i = 0; i < 1000; i++) + { + _log.LogInformation("==============================================================================="); + _log.LogInformation(description); + var cache = new SimpleCache(); + var masterInformer = Substitute.For>(); + masterInformer.GetResource(ResourceStreamType.ListWatch).Returns(scenario.ToTestObservable()); + var sharedInformer = new SharedInformer(masterInformer, _log, x => x.Key, cache); + + var observable = sharedInformer + .GetResource(ResourceStreamType.ListWatch) + .TimeoutIfNotDebugging() + .ToList(); + var results = await observable; + + results.Should().NotBeEmpty(); + results.Should().BeEquivalentTo(expected); + } + } + + [Fact] + public void IncompleteResetOnMasterWithException_ReceivedExceptionWithNoData() + { + + var cache = new SimpleCache(); + var masterInformer = Substitute.For>(); + masterInformer.GetResource(ResourceStreamType.ListWatch).Returns( + Observable.Create>(obs => + { + obs.OnNext(new TestResource(1).ToResourceEvent(EventTypeFlags.ResetStart)); + obs.OnError(new TestCompleteException()); + return Disposable.Empty; + })); + var sharedInformer = new SharedInformer(masterInformer, _log, x => x.Key, cache); + + var observable = sharedInformer + .GetResource(ResourceStreamType.ListWatch) + .TimeoutIfNotDebugging() + .ToList(); + + var dataReceived = false; + var testComplete = new TaskCompletionSource(); + observable.Subscribe( + x => dataReceived = true, + e => testComplete.TrySetException(e), + () => testComplete.SetResult(true)); + + Func> act = async () => await testComplete.Task.TimeoutIfNotDebugging(); + act.Should().Throw(); + dataReceived.Should().BeFalse(); + } + + [Fact] + public async Task WhenSecondSubscriber_ReuseMasterConnection() + { + + var cache = new SimpleCache(); + var masterInformer = Substitute.For>(); + var scheduler = new TestScheduler(); + masterInformer.GetResource(ResourceStreamType.ListWatch) + .Returns(TestData.Events.ResetWith2_Delay_UpdateToEach.ToTestObservable(scheduler)); + + var sharedInformer = new SharedInformer(masterInformer, _log, x => x.Key, cache); + + var tcs = new TaskCompletionSource>>(); + // we attach after first subscription messages are established, but before any "watch" updates come in + scheduler.ScheduleAbsolute(50, async () => + { + scheduler.Stop(); + // need to pause virtual time brifly since child subscribers runs on separate thread and needs to be in position before we resume sending messages to master + var _ = Task.Delay(10).ContinueWith(x => scheduler.Start()); + var second = await sharedInformer + .GetResource(ResourceStreamType.ListWatch) + .TimeoutIfNotDebugging() + .ToList(); + tcs.SetResult(second); + }); + + await sharedInformer + .GetResource(ResourceStreamType.ListWatch) + .TimeoutIfNotDebugging() + .ToList(); + + await masterInformer.Received(1).GetResource(ResourceStreamType.ListWatch); + } + + + [Theory] + [MemberData(nameof(GetTestScenarios))] + public async Task SecondSubscriber(string description, ScheduledEvent[] scenario, ResourceEvent[] expected) + { + _log.LogInformation("==============================================================================="); + _log.LogInformation(description); + var cache = new SimpleCache(); + var masterInformer = Substitute.For>(); + var scheduler = new TestScheduler(); + masterInformer.GetResource(ResourceStreamType.ListWatch) + .Returns(scenario.ToTestObservable(scheduler)); + + var sharedInformer = new SharedInformer(masterInformer, _log, x => x.Key, cache); + + var tcs = new TaskCompletionSource>>(); + // we attach after first subscription messages are established, but before any "watch" updates come in + scheduler.ScheduleAbsolute(50, async () => + { + scheduler.Stop(); + // need to pause virtual time briefly since child subscribers runs on separate thread and needs to be in position before we resume sending messages to master + var pause = Task.Delay(10).ContinueWith(x => scheduler.Start()); + var second = await sharedInformer + .GetResource(ResourceStreamType.ListWatch) + .TimeoutIfNotDebugging() + .ToList(); + tcs.SetResult(second); + }); + + await sharedInformer + .GetResource(ResourceStreamType.ListWatch) + .TimeoutIfNotDebugging() + .ToList(); + + var secondResults = await tcs.Task; + + secondResults.Should().NotBeEmpty(); + secondResults.Should().BeEquivalentTo(expected); + } + + [Theory] + [MemberData(nameof(GetComputedTestScenarios))] + public async Task ComputedEvents(string description, ScheduledEvent[] scenario, ResourceEvent[] expected, IEqualityComparer comparer = null) + { + var masterInformer = Substitute.For>(); + masterInformer.GetResource(ResourceStreamType.ListWatch).Returns(scenario.ToTestObservable()); + _log.LogInformation("==============================================================================="); + _log.LogInformation(description); + var cache = new SimpleCache(); + + var sharedInformer = new SharedInformer(masterInformer, _log, x => x.Key, cache); + + var observable = sharedInformer + .GetResource(ResourceStreamType.ListWatch) + .ComputeMissedEventsBetweenResets(x => x.Key, comparer) + .TimeoutIfNotDebugging() + .ToList(); + var results = await observable; + + results.Should().NotBeEmpty(); + results.Should().BeEquivalentTo(expected); + } + + [Fact] + public async Task SubscribeAndUnsubscribe_WhenLastSubscriber_ClosesMasterConnection() + { + var cache = new SimpleCache(); + var masterInformer = Substitute.For>(); + var closeCalled = false; + var scheduler = new TestScheduler(); + masterInformer.GetResource(ResourceStreamType.ListWatch) + .Returns(Observable.Create>(observer => + new CompositeDisposable(TestData.Events + .EmptyReset_Delay_Add + .ToTestObservable(scheduler) + .TimeoutIfNotDebugging() + .Subscribe(observer), Disposable.Create(() => closeCalled = true)))); + var sharedInformer = new SharedInformer(masterInformer, _log, x => x.Key, cache); + + await sharedInformer + .GetResource(ResourceStreamType.ListWatch) + .TimeoutIfNotDebugging() + .ToList(); + + closeCalled.Should().BeTrue(); + } + } + +} diff --git a/tests/KubernetesClient.Tests/StreamDemuxerTests.cs b/tests/KubernetesClient.Tests/StreamDemuxerTests.cs index 3c3ff9a5d..268957e56 100644 --- a/tests/KubernetesClient.Tests/StreamDemuxerTests.cs +++ b/tests/KubernetesClient.Tests/StreamDemuxerTests.cs @@ -5,7 +5,7 @@ using System.Text; using System.Threading; using System.Threading.Tasks; -using k8s.tests.Mock; +using k8s.Tests.Mock; using Xunit; using Xunit.Abstractions; diff --git a/tests/KubernetesClient.Tests/TestData.cs b/tests/KubernetesClient.Tests/TestData.cs new file mode 100644 index 000000000..78b1e8ae3 --- /dev/null +++ b/tests/KubernetesClient.Tests/TestData.cs @@ -0,0 +1,156 @@ +using System; +using System.Collections; +using System.Collections.Generic; +using System.Linq; +using System.Reflection; +using k8s.Informers.Notifications; +using k8s.Models; +using k8s.Tests.Utils; + +namespace k8s.Tests +{ + public static class TestData + { + public static class Events + { + public static List[]>> AllScenarios => + typeof(Events).GetProperties(BindingFlags.Public | BindingFlags.Static) + .Where(x => x.PropertyType == typeof(ScheduledEvent[])) + .Select(x => Tuple.Create(x.Name, (ScheduledEvent[])x.GetMethod.Invoke(null, null))) + .ToList(); + public static ScheduledEvent[] EmptyReset_Delay_Add => new [] + { + new ResourceEvent(EventTypeFlags.ResetEmpty, null).ScheduleFiring(0), + new TestResource(1).ToResourceEvent(EventTypeFlags.Add).ScheduleFiring(100) + }; + public static ScheduledEvent[] ResetWith2_Delay_UpdateToEach => new [] + { + new TestResource(1).ToResourceEvent(EventTypeFlags.ResetStart).ScheduleFiring(0), + new TestResource(2).ToResourceEvent(EventTypeFlags.ResetEnd).ScheduleFiring(0), + new TestResource(1).ToResourceEvent(EventTypeFlags.Modify).ScheduleFiring(100), + new TestResource(1).ToResourceEvent(EventTypeFlags.Modify).ScheduleFiring(200) + }; + + public static ScheduledEvent[] ResetWith2_Delay_UpdateBoth_Delay_Add1 => new [] + { + new TestResource(1).ToResourceEvent(EventTypeFlags.ResetStart).ScheduleFiring(0), + new TestResource(2).ToResourceEvent(EventTypeFlags.ResetEnd).ScheduleFiring(0), + new TestResource(1).ToResourceEvent(EventTypeFlags.Modify).ScheduleFiring(100), + new TestResource(1).ToResourceEvent(EventTypeFlags.Modify).ScheduleFiring(200), + new TestResource(3).ToResourceEvent(EventTypeFlags.Add).ScheduleFiring(400), + }; + + public static ScheduledEvent[] ResetWith2_Delay_ResetWith1One_ImplicitDeletion => new[] + { + new TestResource(1).ToResourceEvent(EventTypeFlags.ResetStart).ScheduleFiring(0), + new TestResource(2).ToResourceEvent(EventTypeFlags.ResetEnd).ScheduleFiring(0), + + new TestResource(1).ToResourceEvent(EventTypeFlags.ResetStart | EventTypeFlags.ResetEnd).ScheduleFiring(100), + }; + + public static ScheduledEvent[] ResetWith1_Delay_ResetWith2_ImplicitAddition => new[] + { + new TestResource(1).ToResourceEvent(EventTypeFlags.ResetStart | EventTypeFlags.ResetEnd).ScheduleFiring(0), + + new TestResource(1).ToResourceEvent(EventTypeFlags.ResetStart).ScheduleFiring(100), + new TestResource(2).ToResourceEvent(EventTypeFlags.ResetEnd).ScheduleFiring(100), + }; + public static ScheduledEvent[] ResetWith2_Delay_ResetWith2OneDifferentVersion_ImplicitUpdate => new[] + { + new TestResource(1, 1).ToResourceEvent(EventTypeFlags.ResetStart).ScheduleFiring(0), + new TestResource(2, 1).ToResourceEvent(EventTypeFlags.ResetEnd).ScheduleFiring(0), + + new TestResource(1, 1).ToResourceEvent(EventTypeFlags.ResetStart).ScheduleFiring(100), + new TestResource(2, 2).ToResourceEvent(EventTypeFlags.ResetEnd).ScheduleFiring(100), + }; + + public static ScheduledEvent[] Sync_Delay_SyncAndUpdate => new[] + { + new TestResource(1).ToResourceEvent(EventTypeFlags.Sync).ScheduleFiring(0), + + new TestResource(1).ToResourceEvent(EventTypeFlags.Sync).ScheduleFiring(100), + new TestResource(1).ToResourceEvent(EventTypeFlags.Modify).ScheduleFiring(120), + }; + } + public static V1Pod TestPod1ResourceVersion1 => new V1Pod() + { + Kind = V1Pod.KubeKind, + ApiVersion = V1Pod.KubeApiVersion, + Metadata = new V1ObjectMeta() + { + Name = "pod1", + ResourceVersion = "pod1V1" + } + }; + + + public static V1Pod TestPod1ResourceVersion2 => new V1Pod() + { + Kind = V1Pod.KubeKind, + ApiVersion = V1Pod.KubeApiVersion, + Metadata = new V1ObjectMeta() + { + Name = "pod1", + ResourceVersion = "pod1V2" + } + }; + public static V1Pod TestPod2ResourceVersion1 => new V1Pod() + { + Kind = V1Pod.KubeKind, + ApiVersion = V1Pod.KubeApiVersion, + Metadata = new V1ObjectMeta() + { + Name = "pod2", + ResourceVersion = "pod2V1" + } + }; + + public static V1Pod TestPod2ResourceVersion2 => new V1Pod() + { + Kind = V1Pod.KubeKind, + ApiVersion = V1Pod.KubeApiVersion, + Metadata = new V1ObjectMeta() + { + Name = "pod2", + ResourceVersion = "pod2V2" + } + }; + + public static V1PodList ListPodEmpty => new V1PodList() + { + Kind = V1PodList.KubeKind, + ApiVersion = V1PodList.KubeApiVersion, + Metadata = new V1ListMeta() + { + ResourceVersion = "podlistV1" + } + }; + public static V1PodList ListPodOneItem => new V1PodList() + { + Kind = V1PodList.KubeKind, + ApiVersion = V1PodList.KubeApiVersion, + Metadata = new V1ListMeta() + { + ResourceVersion = "podlistV2" + }, + Items = new List() + { + TestPod1ResourceVersion1 + } + }; + public static V1PodList ListPodsTwoItems => new V1PodList() + { + Kind = V1PodList.KubeKind, + ApiVersion = V1PodList.KubeApiVersion, + Metadata = new V1ListMeta() + { + ResourceVersion = "podlistV3" + }, + Items = new List() + { + TestPod1ResourceVersion1, + TestPod2ResourceVersion1 + } + }; + } +} diff --git a/tests/KubernetesClient.Tests/Utils/ScheduledEvent.cs b/tests/KubernetesClient.Tests/Utils/ScheduledEvent.cs new file mode 100644 index 000000000..5181225af --- /dev/null +++ b/tests/KubernetesClient.Tests/Utils/ScheduledEvent.cs @@ -0,0 +1,14 @@ +using k8s.Informers.Notifications; + +namespace k8s.Tests.Utils +{ + public struct ScheduledEvent + { + public ResourceEvent Event { get; set; } + public long ScheduledAt { get; set; } + public override string ToString() + { + return $"\n T{ScheduledAt}: {Event.ToString().Replace("\r\n",string.Empty).Trim()}"; + } + } +} diff --git a/tests/KubernetesClient.Tests/Utils/TestCompleteException.cs b/tests/KubernetesClient.Tests/Utils/TestCompleteException.cs new file mode 100644 index 000000000..04fcdc013 --- /dev/null +++ b/tests/KubernetesClient.Tests/Utils/TestCompleteException.cs @@ -0,0 +1,8 @@ +using System; +namespace k8s.Tests.Utils +{ + public class TestCompleteException : Exception + { + + } +} diff --git a/tests/KubernetesClient.Tests/Utils/TestExtensions.cs b/tests/KubernetesClient.Tests/Utils/TestExtensions.cs new file mode 100644 index 000000000..fd6a37607 --- /dev/null +++ b/tests/KubernetesClient.Tests/Utils/TestExtensions.cs @@ -0,0 +1,170 @@ +using System; +using System.Collections.Generic; +using System.Diagnostics; +using System.Linq; +using System.Net; +using System.Net.Http; +using System.Reactive.Linq; +using System.Threading; +using System.Threading.Tasks; +using k8s.Informers.Notifications; +using k8s.Models; +using k8s.Tests.Mock; +using Microsoft.Extensions.Logging; +using Microsoft.Rest; +using Newtonsoft.Json; +using Newtonsoft.Json.Converters; +using Microsoft.Reactive.Testing; +using System.Reactive.Concurrency; +using System.Reactive.Disposables; +using NSubstitute; + +namespace k8s.Tests.Utils +{ + public static class TestExtensions + { + private static TimeSpan DefaultTimeout = TimeSpan.FromSeconds(5); + + public static ScheduledEvent ScheduleFiring(this ResourceEvent obj, long fireAt) + { + return new ScheduledEvent {Event = obj, ScheduledAt = fireAt}; + } + + public static IObservable TimeoutIfNotDebugging(this IObservable source) => + source.TimeoutIfNotDebugging(DefaultTimeout); + + public static IObservable TimeoutIfNotDebugging(this IObservable source, TimeSpan timeout) => + Debugger.IsAttached ? source : source.Timeout(timeout); + + public static async Task TimeoutIfNotDebugging(this Task task) => await task.TimeoutIfNotDebugging(DefaultTimeout); + public static async Task TimeoutIfNotDebugging(this Task task, TimeSpan timeout) + { + async Task Wrapper() + { + await task; + return true; + } + await Wrapper().TimeoutIfNotDebugging(timeout); + } + + public static async Task TimeoutIfNotDebugging(this Task task) => await task.TimeoutIfNotDebugging(DefaultTimeout); + + public static async Task TimeoutIfNotDebugging(this Task task, TimeSpan timeout) + { + if (Debugger.IsAttached) + { + return await task; + } + + using var timeoutCancellationTokenSource = new CancellationTokenSource(); + var completedTask = await Task.WhenAny(task, Task.Delay(timeout, timeoutCancellationTokenSource.Token)); + if (completedTask == task) + { + timeoutCancellationTokenSource.Cancel(); + return await task; // Very important in order to propagate exceptions + } + + throw new TimeoutException("The operation has timed out."); + } + + public static string ToJson(this object obj, Formatting formatting = Formatting.None) + { + return JsonConvert.SerializeObject(obj, formatting, new StringEnumConverter()); + } + + public static Watcher.WatchEvent ToWatchEvent(this T obj, WatchEventType eventType) + { + return new Watcher.WatchEvent {Type = eventType, Object = obj}; + } + + public static HttpOperationResponse> ToHttpOperationResponse(this TL obj) where TL : IItems where TV : IKubernetesObject + { + return new HttpOperationResponse>() + { + Body = JsonConvert.DeserializeObject>(obj.ToJson()), + Response = new HttpResponseMessage() + { + StatusCode = HttpStatusCode.OK, + Content = new StringContent(obj.ToJson()) + + } + }; + } + + public static HttpOperationResponse> ToHttpOperationResponse(this Watcher.WatchEvent obj) where T : IKubernetesObject + { + return new[] {obj}.ToHttpOperationResponse(); + } + + public static HttpOperationResponse> ToHttpOperationResponse(this IEnumerable.WatchEvent> obj) where T : IKubernetesObject + { + var stringContent = new StringContent(string.Join("\n",obj.Select(x => x.ToJson()))); + var lineContent = new WatcherDelegatingHandler.LineSeparatedHttpContent(stringContent, CancellationToken.None); + lineContent.LoadIntoBufferAsync().Wait(); + var httpResponse = new HttpOperationResponse>() + { + Response = new HttpResponseMessage() + { + StatusCode = HttpStatusCode.OK, + Content = lineContent + } + }; + return httpResponse; + } + + public static IObservable> ToTestObservable(this ICollection> source, TestScheduler testScheduler = null, bool startOnSubscribe = true, ILogger logger = null) + { + if(testScheduler == null) + testScheduler = new TestScheduler(); + return Observable.Create>(o => + { + var closeAt = source.Max(x => x.ScheduledAt); + foreach (var e in source ) + { + testScheduler.ScheduleAbsolute(e.ScheduledAt, () => o.OnNext(e.Event)); + } + testScheduler.ScheduleAbsolute(closeAt, async () => + { + logger?.LogTrace("Test sequence is complete"); + // this is a bit of a hack but since in some tests observable is connected to TPL blocks which don't + // run on virtual scheduler, the timings come off and introduce race condition. this ensures that all messages are + // accepted by the receiving block before it is marked for completion + await Task.Delay(10); + o.OnCompleted(); + }); + if(startOnSubscribe) + testScheduler.Start(); + return Disposable.Empty; + }); + } + + public static ResourceEvent[] ToBasicExpected(this IEnumerable> events) + { + var lastKnown = new Dictionary(); + var retval = new List>(); + foreach (var e in events.Select(x => x.Event)) + { + var item = e; + if (e.EventFlags.HasFlag(EventTypeFlags.Modify) && lastKnown.TryGetValue(e.Value.Key, out var oldValue)) + { + item = new ResourceEvent(e.EventFlags, e.Value, oldValue); + } + + if (e.EventFlags.HasFlag(EventTypeFlags.Delete)) + { + lastKnown.Remove(e.Value.Key); + } + else + { + if (e.Value != null) + lastKnown[e.Value.Key] = e.Value; + } + + retval.Add(item); + } + + return retval.ToArray(); + } + } +} + diff --git a/tests/KubernetesClient.Tests/Utils/TestResource.cs b/tests/KubernetesClient.Tests/Utils/TestResource.cs new file mode 100644 index 000000000..32f1e30c3 --- /dev/null +++ b/tests/KubernetesClient.Tests/Utils/TestResource.cs @@ -0,0 +1,73 @@ +using System; +using System.Collections.Generic; +using System.Diagnostics; + +namespace k8s.Tests.Utils +{ + [DebuggerStepThrough] + public class TestResource + { + private sealed class KeyVersionEqualityComparer : IEqualityComparer + { + public bool Equals(TestResource x, TestResource y) + { + if (ReferenceEquals(x, y)) return true; + if (ReferenceEquals(x, null)) return false; + if (ReferenceEquals(y, null)) return false; + if (x.GetType() != y.GetType()) return false; + return x.Key == y.Key && x.Version == y.Version; + } + + public int GetHashCode(TestResource obj) + { + unchecked + { + return (obj.Key * 397) ^ obj.Version; + } + } + } + + public static IEqualityComparer KeyVersionComparer { get; } = new KeyVersionEqualityComparer(); + + protected bool Equals(TestResource other) + { + return Value == other.Value && Key == other.Key && Version == other.Version; + } + + public override bool Equals(object obj) + { + if (ReferenceEquals(null, obj)) return false; + if (ReferenceEquals(this, obj)) return true; + if (obj.GetType() != this.GetType()) return false; + return Equals((TestResource) obj); + } + + public override int GetHashCode() + { + unchecked + { + var hashCode = (Value != null ? Value.GetHashCode() : 0); + hashCode = (hashCode * 397) ^ Key; + hashCode = (hashCode * 397) ^ Version; + return hashCode; + } + } + + + public TestResource(int key, int version = 1, string value = "test") + { + Value = value; + Version = version; + Key = key; + } + + public string Value { get; } + public int Key { get; } + public int Version { get; } + + public override string ToString() + { + return $"{nameof(Key)}: {Key}, {nameof(Value)}: {Value}, {nameof(Version)} {Version}"; + } + } +} diff --git a/tests/KubernetesClient.Tests/Utils/XunitLogger.cs b/tests/KubernetesClient.Tests/Utils/XunitLogger.cs new file mode 100644 index 000000000..0afaddbb8 --- /dev/null +++ b/tests/KubernetesClient.Tests/Utils/XunitLogger.cs @@ -0,0 +1,37 @@ +using System; +using System.Threading; +using Microsoft.Extensions.Logging; +using Xunit.Abstractions; + +namespace k8s.Tests.Utils +{ + public class XunitLogger : ILogger, IDisposable + { + private ITestOutputHelper _output; + + public XunitLogger(ITestOutputHelper output) + { + _output = output; + } + public void Log(LogLevel logLevel, EventId eventId, TState state, Exception exception, Func formatter) + { + _output.WriteLine($"{DateTime.Now.Ticks - Current} | {state} | ThreadID: {Thread.CurrentThread.ManagedThreadId}"); + } + + private static long Current = DateTime.Now.Ticks; + + public bool IsEnabled(LogLevel logLevel) + { + return true; + } + + public IDisposable BeginScope(TState state) + { + return this; + } + + public void Dispose() + { + } + } +} diff --git a/tests/KubernetesClient.Tests/VersionPartitionedSharedCacheTests.cs b/tests/KubernetesClient.Tests/VersionPartitionedSharedCacheTests.cs new file mode 100644 index 000000000..61913e535 --- /dev/null +++ b/tests/KubernetesClient.Tests/VersionPartitionedSharedCacheTests.cs @@ -0,0 +1,161 @@ +using System; +using System.Collections.Generic; +using System.Linq; +using FluentAssertions; +using k8s.Informers.Cache; +using k8s.Tests.Utils; +using Xunit; + +namespace k8s.Tests +{ + public class VersionPartitionedSharedCacheTests + { + private readonly VersionPartitionedSharedCache _sut; + + public VersionPartitionedSharedCacheTests() + { + _sut = new VersionPartitionedSharedCache(x => x.Key, x => x.Version); + } + + [Fact] + public void AddItem_WhenExistingItemInOtherPartitions_ReuseExistingValue() + { + var resourceV1 = new TestResource(1,1); + var otherResourceV1 = new TestResource(1,1); + var partition1 = _sut.CreatePartition(); + var partition2 = _sut.CreatePartition(); + partition1.Add(resourceV1.Key, resourceV1); + partition2.Add(resourceV1.Key, otherResourceV1); + _sut.Items.Should().HaveCount(1); + partition1.Should().HaveCount(1); + partition2.Should().HaveCount(1); + partition2[1].Should().BeSameAs(resourceV1); + } + + [Fact] + public void AddItem_WhenMultiplePartitions_OtherPartitionsNotAffected() + { + var resourceV1 = new TestResource(1,1); + var partition1 = _sut.CreatePartition(); + var partition2 = _sut.CreatePartition(); + partition1.Add(resourceV1.Key, resourceV1); + partition1.Should().HaveCount(1); + partition1.Values.First().Should().BeSameAs(resourceV1); + partition2.Should().BeEmpty(); + } + [Fact] + public void AddItem_WhenResourceExistsWithDifferentVersion_AddAsNew() + { + var resourceV1 = new TestResource(1,1); + var resourceV2 = new TestResource(1,2); + + var partition1 = _sut.CreatePartition(); + var partition2 = _sut.CreatePartition(); + partition1.Add(resourceV1.Key, resourceV1); + partition2.Add(resourceV2.Key, resourceV2); + + _sut.Items.Should().HaveCount(2); + partition1.Should().HaveCount(1); + partition1.Should().Contain(KeyValuePair.Create(1, resourceV1)); + partition2.Should().HaveCount(1); + partition2.Should().Contain(KeyValuePair.Create(1, resourceV2)); + partition1[1].Version.Should().NotBe(partition2[1].Version); + } + + [Fact] + public void RemoveItem_WhenResourceExistsWithSameVersionInOtherPartitions_RemoveWithoutAffectingOtherPartitions() + { + var resourceV1 = new TestResource(1,1); + + var partition1 = _sut.CreatePartition(); + var partition2 = _sut.CreatePartition(); + partition1.Add(resourceV1.Key, resourceV1); + partition2.Add(resourceV1.Key, resourceV1); + partition2.Remove(resourceV1.Key); + + _sut.Items.Should().HaveCount(1); + partition1.Should().HaveCount(1); + partition2.Should().BeEmpty(); + } + + + [Fact] + public void RemoveItem_WhenNoOtherPartitionsTrackingRemovedItem_RemovedFromSharedList() + { + var resourceV1 = new TestResource(1,1); + + var partition1 = _sut.CreatePartition(); + var partition2 = _sut.CreatePartition(); + partition1.Add(resourceV1.Key, resourceV1); + partition2.Add(resourceV1.Key, resourceV1); + partition1.Remove(resourceV1.Key); + partition2.Remove(resourceV1.Key); + + partition1.Should().BeEmpty(); + partition2.Should().BeEmpty(); + _sut.Items.Should().BeEmpty(); + } + + [Fact] + public void Clear_WhenResourceExistsWithSameVersionInOtherPartitions_RemoveWithoutAffectingOtherPartitions() + { + var resource1V1 = new TestResource(1,1); + var resource2V1 = new TestResource(2,1); + + var partition1 = _sut.CreatePartition(); + var partition2 = _sut.CreatePartition(); + partition1.Add(resource1V1.Key, resource1V1); + partition2.Add(resource1V1.Key, resource1V1); + partition2.Add(resource2V1.Key, resource2V1); + partition2.Clear(); + + _sut.Items.Should().HaveCount(1); + partition1.Should().HaveCount(1); + partition2.Should().BeEmpty(); + } + + + [Fact] + public void SetIndexer_WhenNotInSharedList_AddToSharedList() + { + var resourceV1 = new TestResource(1,1); + + var partition1 = _sut.CreatePartition(); + partition1[resourceV1.Key] = resourceV1; + + partition1.Should().HaveCount(1); + _sut.Items.Should().HaveCount(1); + } + [Fact] + public void SetIndexer_WhenItemAlreadyExistsInShared_DontAddReuseExisting() + { + var resourceV1 = new TestResource(1,1); + var otherResourceV1 = new TestResource(1,1); + + var partition1 = _sut.CreatePartition(); + var partition2 = _sut.CreatePartition(); + partition1[1] = resourceV1; + partition2[1] = otherResourceV1; + + partition1.Should().HaveCount(1); + partition2.Should().HaveCount(1); + partition1.Values.First().Should().BeSameAs(resourceV1); + partition2.Values.First().Should().BeSameAs(resourceV1); + } + + [Fact] + public void Add_WhenKeyNotMatchKeyInResource_Throws() + { + var resource1V1 = new TestResource(1,1); + + var partition = _sut.CreatePartition(); + Action act1 = () => partition.Add(2, resource1V1); + Action act2 = () => partition.Add(KeyValuePair.Create(2, resource1V1)); + Action act3 = () => partition[2] = resource1V1; + + act1.Should().Throw(); + act2.Should().Throw(); + act3.Should().Throw(); + } + } +} diff --git a/tests/KubernetesClient.Tests/WatchTests.cs b/tests/KubernetesClient.Tests/WatchTests.cs index a1145a406..7c778d441 100644 --- a/tests/KubernetesClient.Tests/WatchTests.cs +++ b/tests/KubernetesClient.Tests/WatchTests.cs @@ -92,16 +92,11 @@ await Assert.ThrowsAnyAsync(() => [Fact] public async Task AsyncWatcher() { - var created = new AsyncManualResetEvent(false); var eventsReceived = new AsyncManualResetEvent(false); - using (var server = new MockKubeApiServer(testOutput, async httpContext => - { - // block until reponse watcher obj created - await created.WaitAsync(); - await WriteStreamLine(httpContext, MockAddedEventStreamLine); - return false; - })) + var replayer = new MockKubeApiServer.ResponsePlayer(MockAddedEventStreamLine.Replace("\r\n","")); + + using (var server = new MockKubeApiServer(testOutput, replayer)) { var client = new Kubernetes(new KubernetesClientConfiguration { @@ -116,12 +111,11 @@ public async Task AsyncWatcher() })) { // here watcher is ready to use, but http server has not responsed yet. - created.Set(); + replayer.SendNextResponse(); await Task.WhenAny(eventsReceived.WaitAsync(), Task.Delay(TestTimeout)); } Assert.True(eventsReceived.IsSet); - Assert.True(created.IsSet); } } @@ -364,7 +358,8 @@ public async Task WatchEventsWithTimeout() { var client = new Kubernetes(new KubernetesClientConfiguration { - Host = server.Uri.ToString() + Host = server.Uri.ToString(), + // Timeout = TimeSpan.FromSeconds(3) }); var listTask = await client.ListNamespacedPodWithHttpMessagesAsync("default", watch: true); From 4dd31b23fd353d08f8ca49428028404140f60808 Mon Sep 17 00:00:00 2001 From: Andrew Stakhov Date: Wed, 1 Apr 2020 10:46:34 -0400 Subject: [PATCH 2/3] Bump up build sdk --- .github/workflows/dotnet.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/dotnet.yaml b/.github/workflows/dotnet.yaml index c2822ac8d..70bee30ec 100644 --- a/.github/workflows/dotnet.yaml +++ b/.github/workflows/dotnet.yaml @@ -14,7 +14,7 @@ jobs: - name: Setup dotnet uses: actions/setup-dotnet@v1 with: - dotnet-version: 2.2.108 + dotnet-version: 3.1.200 - run: dotnet build --configuration Release - name: test with dotnet run: dotnet test /p:CollectCoverage=true /p:ExcludeByFile=\"**/KubernetesClient/generated/**/*.cs\" /p:CoverletOutputFormat="cobertura" From dd10ff37a87a5a72391701e331877f7d6f33f83b Mon Sep 17 00:00:00 2001 From: Andrew Stakhov Date: Wed, 1 Apr 2020 11:05:03 -0400 Subject: [PATCH 3/3] Bump up nuget workflow version --- .github/workflows/nuget.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/nuget.yaml b/.github/workflows/nuget.yaml index 4e62c6b05..5bd252562 100644 --- a/.github/workflows/nuget.yaml +++ b/.github/workflows/nuget.yaml @@ -14,7 +14,7 @@ jobs: - name: Setup .NET Core uses: actions/setup-dotnet@v1 with: - dotnet-version: 2.2.108 + dotnet-version: 3.1.200 - name: test with dotnet run: dotnet test - name: pack