func newTestManager() *manager { m := NewManager( status.NewManager(&testclient.Fake{}, kubepod.NewBasicPodManager(nil)), results.NewManager(), results.NewManager(), nil, // runner kubecontainer.NewRefManager(), &record.FakeRecorder{}, ).(*manager) // Don't actually execute probes. m.prober.exec = fakeExecProber{probe.Success, nil} return m }
func newTestManager() *manager { const probePeriod = 1 m := NewManager( probePeriod, status.NewManager(&testclient.Fake{}), results.NewManager(), results.NewManager(), nil, // runner kubecontainer.NewRefManager(), &record.FakeRecorder{}, ).(*manager) // Don't actually execute probes. m.prober.exec = fakeExecProber{probe.Success, nil} return m }
func NewFakeKubeRuntimeManager(runtimeService internalApi.RuntimeService, imageService internalApi.ImageManagerService, networkPlugin network.NetworkPlugin, osInterface kubecontainer.OSInterface) (*kubeGenericRuntimeManager, error) { recorder := &record.FakeRecorder{} kubeRuntimeManager := &kubeGenericRuntimeManager{ recorder: recorder, cpuCFSQuota: false, livenessManager: proberesults.NewManager(), containerRefManager: kubecontainer.NewRefManager(), osInterface: osInterface, networkPlugin: networkPlugin, runtimeHelper: &fakeRuntimeHelper{}, runtimeService: runtimeService, imageService: imageService, keyring: credentialprovider.NewDockerKeyring(), } typedVersion, err := runtimeService.Version(kubeRuntimeAPIVersion) if err != nil { return nil, err } kubeRuntimeManager.containerGC = NewContainerGC(runtimeService, newFakePodGetter(), kubeRuntimeManager) kubeRuntimeManager.runtimeName = typedVersion.GetRuntimeName() kubeRuntimeManager.imagePuller = images.NewImageManager( kubecontainer.FilterEventRecorder(recorder), kubeRuntimeManager, flowcontrol.NewBackOff(time.Second, 300*time.Second), false) kubeRuntimeManager.runner = lifecycle.NewHandlerRunner( &fakeHTTP{}, kubeRuntimeManager, kubeRuntimeManager) return kubeRuntimeManager, nil }
func NewManager( defaultProbePeriod time.Duration, statusManager status.Manager, prober Prober) Manager { return &manager{ defaultProbePeriod: defaultProbePeriod, statusManager: statusManager, prober: prober, readinessCache: results.NewManager(), readinessProbes: make(map[containerPath]*worker), } }
func newTestManager() *manager { refManager := kubecontainer.NewRefManager() refManager.SetRef(testContainerID, &api.ObjectReference{}) // Suppress prober warnings. m := NewManager( status.NewManager(&testclient.Fake{}, kubepod.NewBasicPodManager(nil)), results.NewManager(), nil, // runner refManager, &record.FakeRecorder{}, ).(*manager) // Don't actually execute probes. m.prober.exec = fakeExecProber{probe.Success, nil} return m }
func newTestManager() *manager { refManager := kubecontainer.NewRefManager() refManager.SetRef(testContainerID, &v1.ObjectReference{}) // Suppress prober warnings. podManager := kubepod.NewBasicPodManager(nil) // Add test pod to pod manager, so that status manager can get the pod from pod manager if needed. podManager.AddPod(getTestPod()) m := NewManager( status.NewManager(&fake.Clientset{}, podManager), results.NewManager(), nil, // runner refManager, &record.FakeRecorder{}, ).(*manager) // Don't actually execute probes. m.prober.exec = fakeExecProber{probe.Success, nil} return m }
func NewManager( statusManager status.Manager, livenessManager results.Manager, runner kubecontainer.ContainerCommandRunner, refManager *kubecontainer.RefManager, recorder record.EventRecorder) Manager { prober := newProber(runner, refManager, recorder) readinessManager := results.NewManager() return &manager{ statusManager: statusManager, prober: prober, readinessManager: readinessManager, livenessManager: livenessManager, workers: make(map[probeKey]*worker), } }
func newTestDockerManager() (*dockertools.DockerManager, *dockertools.FakeDockerClient) { fakeDocker := dockertools.NewFakeDockerClient() fakeRecorder := &record.FakeRecorder{} containerRefManager := kubecontainer.NewRefManager() networkPlugin, _ := network.InitNetworkPlugin([]network.NetworkPlugin{}, "", network.NewFakeHost(nil)) dockerManager := dockertools.NewFakeDockerManager( fakeDocker, fakeRecorder, proberesults.NewManager(), containerRefManager, &cadvisorapi.MachineInfo{}, dockertools.PodInfraContainerImage, 0, 0, "", kubecontainer.FakeOS{}, networkPlugin, nil, nil, nil) return dockerManager, fakeDocker }
func newTestDockerManager() (*dockertools.DockerManager, *dockertools.FakeDockerClient) { fakeDocker := &dockertools.FakeDockerClient{VersionInfo: docker.Env{"Version=1.1.3", "ApiVersion=1.15"}, Errors: make(map[string]error), RemovedImages: sets.String{}} fakeRecorder := &record.FakeRecorder{} containerRefManager := kubecontainer.NewRefManager() networkPlugin, _ := network.InitNetworkPlugin([]network.NetworkPlugin{}, "", network.NewFakeHost(nil)) dockerManager := dockertools.NewFakeDockerManager( fakeDocker, fakeRecorder, proberesults.NewManager(), containerRefManager, &cadvisorapi.MachineInfo{}, dockertools.PodInfraContainerImage, 0, 0, "", kubecontainer.FakeOS{}, networkPlugin, nil, nil, nil) return dockerManager, fakeDocker }
func newTestDockerManager() (*dockertools.DockerManager, *dockertools.FakeDockerClient) { fakeDocker := dockertools.NewFakeDockerClient() fakeRecorder := &record.FakeRecorder{} containerRefManager := kubecontainer.NewRefManager() networkPlugin, _ := network.InitNetworkPlugin([]network.NetworkPlugin{}, "", nettest.NewFakeHost(nil), componentconfig.HairpinNone) dockerManager := dockertools.NewFakeDockerManager( fakeDocker, fakeRecorder, proberesults.NewManager(), containerRefManager, &cadvisorapi.MachineInfo{}, options.GetDefaultPodInfraContainerImage(), 0, 0, "", &containertest.FakeOS{}, networkPlugin, nil, nil, nil) return dockerManager, fakeDocker }
func newTestDockerManagerWithHTTPClient(fakeHTTPClient *fakeHTTP) (*DockerManager, *FakeDockerClient) { fakeDocker := NewFakeDockerClient() fakeRecorder := &record.FakeRecorder{} containerRefManager := kubecontainer.NewRefManager() networkPlugin, _ := network.InitNetworkPlugin([]network.NetworkPlugin{}, "", network.NewFakeHost(nil)) dockerManager := NewFakeDockerManager( fakeDocker, fakeRecorder, proberesults.NewManager(), containerRefManager, &cadvisorapi.MachineInfo{}, kubetypes.PodInfraContainerImage, 0, 0, "", kubecontainer.FakeOS{}, networkPlugin, &fakeRuntimeHelper{}, fakeHTTPClient, util.NewBackOff(time.Second, 300*time.Second)) return dockerManager, fakeDocker }
func NewManager( statusManager status.Manager, livenessManager results.Manager, runner kubecontainer.ContainerCommandRunner, refManager *kubecontainer.RefManager, recorder record.EventRecorder) Manager { prober := newProber(runner, refManager, recorder) readinessManager := results.NewManager() m := &manager{ statusManager: statusManager, prober: prober, readinessManager: readinessManager, livenessManager: livenessManager, workers: make(map[probeKey]*worker), } // Start syncing readiness. go util.Forever(m.updateReadiness, 0) return m }
func NewFakeKubeRuntimeManager(runtimeService internalApi.RuntimeService, imageService internalApi.ImageManagerService) (*kubeGenericRuntimeManager, error) { networkPlugin, _ := network.InitNetworkPlugin( []network.NetworkPlugin{}, "", nettest.NewFakeHost(nil), componentconfig.HairpinNone, "10.0.0.0/8", ) return NewKubeGenericRuntimeManager( &record.FakeRecorder{}, proberesults.NewManager(), kubecontainer.NewRefManager(), &containertest.FakeOS{}, networkPlugin, &fakeRuntimeHelper{}, &fakeHTTP{}, flowcontrol.NewBackOff(time.Second, 300*time.Second), false, false, runtimeService, imageService, ) }