Beispiel #1
0
// newQuotaEvaluator configures an admission controller that can enforce quota constraints
// using the provided registry.  The registry must have the capability to handle group/kinds that
// are persisted by the server this admission controller is intercepting
func newQuotaEvaluator(client clientset.Interface, registry quota.Registry) (*quotaEvaluator, error) {
	liveLookupCache, err := lru.New(100)
	if err != nil {
		return nil, err
	}
	updatedCache, err := lru.New(100)
	if err != nil {
		return nil, err
	}
	lw := &cache.ListWatch{
		ListFunc: func(options api.ListOptions) (runtime.Object, error) {
			return client.Core().ResourceQuotas(api.NamespaceAll).List(options)
		},
		WatchFunc: func(options api.ListOptions) (watch.Interface, error) {
			return client.Core().ResourceQuotas(api.NamespaceAll).Watch(options)
		},
	}
	indexer, reflector := cache.NewNamespaceKeyedIndexerAndReflector(lw, &api.ResourceQuota{}, 0)

	reflector.Run()
	return &quotaEvaluator{
		client:          client,
		indexer:         indexer,
		registry:        registry,
		liveLookupCache: liveLookupCache,
		liveTTL:         time.Duration(30 * time.Second),
		updatedQuotas:   updatedCache,

		queue:      workqueue.New(),
		work:       map[string][]*admissionWaiter{},
		dirtyWork:  map[string][]*admissionWaiter{},
		inProgress: sets.String{},
	}, nil
}
// newQuotaAccessor creates an object that conforms to the QuotaAccessor interface to be used to retrieve quota objects.
func newQuotaAccessor(client clientset.Interface) (*quotaAccessor, error) {
	liveLookupCache, err := lru.New(100)
	if err != nil {
		return nil, err
	}
	updatedCache, err := lru.New(100)
	if err != nil {
		return nil, err
	}
	lw := &cache.ListWatch{
		ListFunc: func(options v1.ListOptions) (runtime.Object, error) {
			internalOptions := api.ListOptions{}
			v1.Convert_v1_ListOptions_To_api_ListOptions(&options, &internalOptions, nil)
			return client.Core().ResourceQuotas(api.NamespaceAll).List(internalOptions)
		},
		WatchFunc: func(options v1.ListOptions) (watch.Interface, error) {
			internalOptions := api.ListOptions{}
			v1.Convert_v1_ListOptions_To_api_ListOptions(&options, &internalOptions, nil)
			return client.Core().ResourceQuotas(api.NamespaceAll).Watch(internalOptions)
		},
	}
	indexer, reflector := cache.NewNamespaceKeyedIndexerAndReflector(lw, &api.ResourceQuota{}, 0)

	return &quotaAccessor{
		client:          client,
		indexer:         indexer,
		reflector:       reflector,
		liveLookupCache: liveLookupCache,
		liveTTL:         time.Duration(30 * time.Second),
		updatedQuotas:   updatedCache,
	}, nil
}
Beispiel #3
0
func main() {
	parse_flags()

	var err error
	if enable_cache {
		// create cache
		dns_cache, err = lru.New(1000)
		if err != nil {
			log.Fatal(err)
		}
	}

	dns.HandleFunc(".", handleRoot)

	logger = NewLogger(logfile, debug)

	logger.Info("Listen on %s\n", bind_addr)

	go func() {
		/* listen tcp */
		err := dns.ListenAndServe(bind_addr, "tcp", nil)
		if err != nil {
			log.Fatal(err)
		}
	}()

	/* listen udp */
	err = dns.ListenAndServe(bind_addr, "udp", nil)
	if err != nil {
		log.Fatal(err)
	}
}
Beispiel #4
0
// NewKeyCacheStandard constructs a new KeyCacheStandard with the given
// cache capacity.
func NewKeyCacheStandard(capacity int) *KeyCacheStandard {
	head, err := lru.New(capacity)
	if err != nil {
		panic(err.Error())
	}
	return &KeyCacheStandard{head}
}
Beispiel #5
0
// NewLimitRanger returns an object that enforces limits based on the supplied limit function
func NewLimitRanger(client clientset.Interface, actions LimitRangerActions) (admission.Interface, error) {
	liveLookupCache, err := lru.New(10000)
	if err != nil {
		return nil, err
	}

	lw := &cache.ListWatch{
		ListFunc: func(options api.ListOptions) (runtime.Object, error) {
			return client.Core().LimitRanges(api.NamespaceAll).List(options)
		},
		WatchFunc: func(options api.ListOptions) (watch.Interface, error) {
			return client.Core().LimitRanges(api.NamespaceAll).Watch(options)
		},
	}
	indexer, reflector := cache.NewNamespaceKeyedIndexerAndReflector(lw, &api.LimitRange{}, 0)
	reflector.Run()

	if actions == nil {
		actions = &DefaultLimitRangerActions{}
	}

	return &limitRanger{
		Handler:         admission.NewHandler(admission.Create, admission.Update),
		client:          client,
		actions:         actions,
		indexer:         indexer,
		liveLookupCache: liveLookupCache,
		liveTTL:         time.Duration(30 * time.Second),
	}, nil
}
Beispiel #6
0
func NewPointInPolygon(source string, cache_size int, cache_trigger int, logger *log.WOFLogger) (*WOFPointInPolygon, error) {

	rtree := rtreego.NewTree(2, 25, 50)

	cache, err := lru.New(cache_size)

	if err != nil {
		return nil, err
	}

	metrics := NewPointInPolygonMetrics()

	placetypes := make(map[string]int)

	pip := WOFPointInPolygon{
		Rtree:        rtree,
		Source:       source,
		Cache:        cache,
		CacheSize:    cache_size,
		CacheTrigger: cache_trigger,
		Placetypes:   placetypes,
		Metrics:      metrics,
		Logger:       logger,
	}

	return &pip, nil
}
Beispiel #7
0
// NewMDCacheStandard constructs a new MDCacheStandard using the given
// cache capacity.
func NewMDCacheStandard(capacity int) *MDCacheStandard {
	tmp, err := lru.New(capacity)
	if err != nil {
		return nil
	}
	return &MDCacheStandard{tmp}
}
// NewDatastore constructs a new LRU Datastore with given capacity.
func NewDatastore(capacity int) (*Datastore, error) {
	cache, err := lru.New(capacity)
	if err != nil {
		return nil, err
	}

	return &Datastore{cache: cache}, nil
}
Beispiel #9
0
func newFlowCache() *flowCache {
	c, err := lru.New(8192)
	if err != nil {
		panic(fmt.Sprintf("LRU flow cache: %v", err))
	}

	return &flowCache{
		cache: c,
	}
}
Beispiel #10
0
func NewCache(limit int, ttl time.Duration) *Cache {
	var cache *lru.Cache
	if 0 < limit {
		var err error
		cache, err = lru.New(limit)
		if err != nil {
			panic(err)
		}
	}
	return &Cache{cache: cache, TTL: ttl}
}
Beispiel #11
0
// NewStatusAdmitter creates a plugin wrapper that ensures every accepted
// route has a status field set that matches this router. The admitter manages
// an LRU of recently seen conflicting updates to handle when two router processes
// with differing configurations are writing updates at the same time.
func NewStatusAdmitter(plugin router.Plugin, client client.RoutesNamespacer, name string) *StatusAdmitter {
	expected, _ := lru.New(1024)
	return &StatusAdmitter{
		plugin:     plugin,
		client:     client,
		routerName: name,

		contentionInterval: 1 * time.Minute,
		expected:           expected,
	}
}
Beispiel #12
0
// NewLifecycle creates a new namespace lifecycle admission control handler
func NewLifecycle(c clientset.Interface, immortalNamespaces sets.String) (admission.Interface, error) {
	forceLiveLookupCache, err := lru.New(100)
	if err != nil {
		panic(err)
	}
	return &lifecycle{
		Handler:              admission.NewHandler(admission.Create, admission.Update, admission.Delete),
		client:               c,
		immortalNamespaces:   immortalNamespaces,
		forceLiveLookupCache: forceLiveLookupCache,
	}, nil
}
Beispiel #13
0
func (pc *ProofCache) setup() error {
	pc.Lock()
	defer pc.Unlock()
	if pc.lru != nil {
		return nil
	}
	lru, err := lru.New(pc.capac)
	if err != nil {
		return err
	}
	pc.lru = lru
	return nil
}
Beispiel #14
0
func (cs lrustore) Put(key string, value interface{}) {
	prefix, key := key[:PREFIX_LEN], key[PREFIX_LEN:]
	mp, ok := cs[prefix]
	if !ok {
		var err error
		mp, err = lru.New(10000)
		if err != nil {
			return
		}
		cs[prefix] = mp
	}
	mp.Add(key, value)
}
Beispiel #15
0
// NewEtcdMutationCache gives back a MutationCache that understands how to deal with etcd backed objects
func NewEtcdMutationCache(backingCache cache.Store) MutationCache {
	lru, err := lru.New(100)
	if err != nil {
		// errors only happen on invalid sizes, this would be programmer error
		panic(err)
	}

	return &mutationCache{
		backingCache:  backingCache,
		mutationCache: lru,
		comparator:    etcd.APIObjectVersioner{},
	}
}
Beispiel #16
0
func main() {
	fmt.Println("hello")
	m, _ := glru.New(1000)
	m.Add("test", Test{Name: "figo", Tp: "android", Count: 1024})
	for k := range m.Keys() {
		log.Println("@k:", k)
		d, _ := m.Get("test")
		v := d.(Test)
		log.Println("@name:", v.Name, "@count:", v.Count, "@tp:", v.Tp)
	}
	d, _ := m.Get("test")
	v := d.(Test)
	log.Println("@name:", v.Name, "@count:", v.Count, "@tp:", v.Tp)
}
Beispiel #17
0
// newImageResolutionCache creates a new resolver that caches frequently loaded images for one minute.
func newImageResolutionCache(images client.ImageInterface, tags client.ImageStreamTagsNamespacer, isImages client.ImageStreamImagesNamespacer, integratedRegistry rules.RegistryMatcher) (*imageResolutionCache, error) {
	imageCache, err := lru.New(128)
	if err != nil {
		return nil, err
	}
	return &imageResolutionCache{
		images:     images,
		tags:       tags,
		isImages:   isImages,
		integrated: integratedRegistry,
		cache:      imageCache,
		expiration: time.Minute,
	}, nil
}
func New(fn Hash) *Map {
	lru_cache, err := lru.New(50000)
	if err != nil {
		log.Fatal("Lru cache failed")
	}
	m := &Map{
		hash:      fn,
		hashMap:   make(map[int]string),
		lru_cache: lru_cache,
	}
	if m.hash == nil {
		m.hash = crc32.ChecksumIEEE
	}
	return m
}
Beispiel #19
0
// newQuotaAccessor creates an object that conforms to the QuotaAccessor interface to be used to retrieve quota objects.
func newQuotaAccessor(clusterQuotaLister *ocache.IndexerToClusterResourceQuotaLister, namespaceLister *cache.IndexerToNamespaceLister, clusterQuotaClient oclient.ClusterResourceQuotasInterface, clusterQuotaMapper clusterquotamapping.ClusterQuotaMapper) *clusterQuotaAccessor {
	updatedCache, err := lru.New(100)
	if err != nil {
		// this should never happen
		panic(err)
	}

	return &clusterQuotaAccessor{
		clusterQuotaLister:   clusterQuotaLister,
		namespaceLister:      namespaceLister,
		clusterQuotaClient:   clusterQuotaClient,
		clusterQuotaMapper:   clusterQuotaMapper,
		updatedClusterQuotas: updatedCache,
	}
}
Beispiel #20
0
// TestAdmitPodInNamespaceWithoutQuota ensures that if a namespace has no quota, that a pod can get in
func TestAdmitPodInNamespaceWithoutQuota(t *testing.T) {
	resourceQuota := &api.ResourceQuota{
		ObjectMeta: api.ObjectMeta{Name: "quota", Namespace: "other", ResourceVersion: "124"},
		Status: api.ResourceQuotaStatus{
			Hard: api.ResourceList{
				api.ResourceCPU:          resource.MustParse("3"),
				api.ResourceMemory:       resource.MustParse("100Gi"),
				api.ResourceLimitsMemory: resource.MustParse("200Gi"),
				api.ResourcePods:         resource.MustParse("5"),
			},
			Used: api.ResourceList{
				api.ResourceCPU:          resource.MustParse("1"),
				api.ResourceMemory:       resource.MustParse("50Gi"),
				api.ResourceLimitsMemory: resource.MustParse("100Gi"),
				api.ResourcePods:         resource.MustParse("3"),
			},
		},
	}
	kubeClient := fake.NewSimpleClientset(resourceQuota)
	indexer := cache.NewIndexer(cache.MetaNamespaceKeyFunc, cache.Indexers{"namespace": cache.MetaNamespaceIndexFunc})
	liveLookupCache, err := lru.New(100)
	if err != nil {
		t.Fatal(err)
	}
	stopCh := make(chan struct{})
	defer close(stopCh)

	quotaAccessor, _ := newQuotaAccessor(kubeClient)
	quotaAccessor.indexer = indexer
	quotaAccessor.liveLookupCache = liveLookupCache
	go quotaAccessor.Run(stopCh)
	evaluator := NewQuotaEvaluator(quotaAccessor, install.NewRegistry(kubeClient), nil, 5, stopCh)

	defer utilruntime.HandleCrash()
	handler := &quotaAdmission{
		Handler:   admission.NewHandler(admission.Create, admission.Update),
		evaluator: evaluator,
	}
	// Add to the index
	indexer.Add(resourceQuota)
	newPod := validPod("not-allowed-pod", 1, getResourceRequirements(getResourceList("100m", "2Gi"), getResourceList("200m", "")))
	// Add to the lru cache so we do not do a live client lookup
	liveLookupCache.Add(newPod.Namespace, liveLookupEntry{expiry: time.Now().Add(time.Duration(30 * time.Second)), items: []*api.ResourceQuota{}})
	err = handler.Admit(admission.NewAttributesRecord(newPod, nil, api.Kind("Pod").WithVersion("version"), newPod.Namespace, newPod.Name, api.Resource("pods").WithVersion("version"), "", admission.Create, nil))
	if err != nil {
		t.Errorf("Did not expect an error because the pod is in a different namespace than the quota")
	}
}
Beispiel #21
0
// Create a new state from a given trie
func New(root common.Hash, db ethdb.Database) (*StateDB, error) {
	tr, err := trie.NewSecure(root, db)
	if err != nil {
		return nil, err
	}
	csc, _ := lru.New(codeSizeCacheSize)
	return &StateDB{
		db:                db,
		trie:              tr,
		codeSizeCache:     csc,
		stateObjects:      make(map[common.Address]*StateObject),
		stateObjectsDirty: make(map[common.Address]struct{}),
		refund:            new(big.Int),
		logs:              make(map[common.Hash]vm.Logs),
	}, nil
}
Beispiel #22
0
// NewLimitRanger returns an object that enforces limits based on the supplied limit function
func NewLimitRanger(actions LimitRangerActions) (admission.Interface, error) {
	liveLookupCache, err := lru.New(10000)
	if err != nil {
		return nil, err
	}

	if actions == nil {
		actions = &DefaultLimitRangerActions{}
	}

	return &limitRanger{
		Handler:         admission.NewHandler(admission.Create, admission.Update),
		actions:         actions,
		liveLookupCache: liveLookupCache,
		liveTTL:         time.Duration(30 * time.Second),
	}, nil
}
Beispiel #23
0
// NewBlockCacheStandard constructs a new BlockCacheStandard instance
// with the given transient capacity (in number of entries) and the
// clean bytes capacity, which is the total of number of bytes allowed
// between the transient and permanent clean caches.  If putting a
// block will exceed this bytes capacity, transient entries are
// evicted until the block will fit in capacity.
func NewBlockCacheStandard(config Config, transientCapacity int,
	cleanBytesCapacity uint64) *BlockCacheStandard {
	b := &BlockCacheStandard{
		config:             config,
		cleanBytesCapacity: cleanBytesCapacity,
		cleanPermanent:     make(map[BlockID]Block),
		dirty:              make(map[dirtyBlockID]Block),
	}

	if transientCapacity > 0 {
		var err error
		// TODO: Plumb error up.
		b.ids, err = lru.New(transientCapacity)
		if err != nil {
			return nil
		}

		b.cleanTransient, err = lru.NewWithEvict(transientCapacity, b.onEvict)
		if err != nil {
			return nil
		}
	}
	return b
}
func NewIndex(source string, cache_size int, cache_trigger int, logger *log.WOFLogger) (*WOFIndex, error) {

	rtree := rtreego.NewTree(2, 25, 50)

	cache, err := lru.New(cache_size)

	if err != nil {
		return nil, err
	}

	placetypes := make(map[string]int)

	idx := WOFIndex{
		RTree:        rtree,
		Source:       source,
		Cache:        cache,
		CacheSize:    cache_size,
		CacheTrigger: cache_trigger,
		Placetypes:   placetypes,
		Logger:       logger,
	}

	return &idx, nil
}
Beispiel #25
0
//  Main
func main() {
	runtime.GOMAXPROCS(runtime.NumCPU())
	verbose := flag.Bool("v", false, "should every proxy request be logged to stdout")
	/*login := flag.String("login", "", "proxy login")
	password := flag.String("password", "", "proxy passwd")*/
	addr := flag.String("addr", ":8080", "proxy listen address")
	flag.Parse()

	// Yara
	c, err := yara.NewCompiler()
	handleErr(err)
	// Load & compile rules
	err = filepath.Walk("rules/yara", func(path string, info os.FileInfo, err error) error {
		if info.IsDir() {
			return nil
		}
		return c.AddFile("", "rules/yara/"+info.Name())
	})
	handleErr(err)

	engine, err := c.Rules()
	handleErr(err)
	c.Destroy()

	// HTML cleaners
	htmlCleaner := NewHTMLCleaner()
	err = htmlCleaner.LoadRulesFromFile("rules/HTMLCleaner.txt")
	handleErr(err)

	// CSS injector
	cssInjector := NewCSSInjector()
	err = cssInjector.LoadRulesFromFile("rules/filters/css2inject.txt")
	handleErr(err)

	// launch proxy
	goproxy.CertOrganisation = "Pure proxy"
	// Cache for certs
	TLSConfigCache, err = lru.New(TLSCacheSize)
	//goproxy.GoproxyCa, err = tls.X509KeyPair(CaCert, CaKey)
	ca, err := tls.X509KeyPair(CaCert, CaKey)
	handleErr(err)
	proxy := goproxy.NewProxyHttpServer()
	proxy.Verbose = *verbose

	MitmConnect := &goproxy.ConnectAction{
		Action: goproxy.ConnectMitm,
		TLSConfig: func(host string, ctx *goproxy.ProxyCtx) (*tls.Config, error) {
			return TLSGetConfig(host, ctx, &ca)
		},
	}
	var AlwaysMitm goproxy.FuncHttpsHandler = func(host string, ctx *goproxy.ProxyCtx) (
		*goproxy.ConnectAction, string) {
		return MitmConnect, host
	}

	proxy.OnRequest().HandleConnect(AlwaysMitm)

	/*auth.ProxyBasic(proxy, "my_realm", func(user, passwd string) bool {
		return user == *login && passwd == *password
	})*/

	/*
		proxy.OnRequest().HandleConnectFunc(func(host string, ctx *goproxy.ProxyCtx) (*goproxy.ConnectAction, string) {
			log.Println(host)
			name := ""
			err = engine.ScanMemory([]byte(host), func(rule *yara.Rule) yara.CallbackStatus {
				name = rule.Identifier
				return yara.Abort
			})
			if name != "" {
				log.Println("REJECTED", name, host)
				return goproxy.RejectConnect, host
			}
			return goproxy.OkConnect, host
		})
	*/

	// POC websocket
	/*proxy.OnRequest().HandleConnectFunc(func(host string, ctx *goproxy.ProxyCtx) (*goproxy.ConnectAction, string) {
		log.Println(host)
		if host == "live.toorop.fr:80" {
			msg := "---------------------------------------------------------\n"
			for k, v := range ctx.Req.Header {
				msg += string(k) + ":" + v[0] + "\n"
			}
			msg += "---------------------------------------------------------\n\n"
			log.Println(msg)
		}
		return goproxy.OkConnect, host
	})*/

	/*proxy.OnRequest(IsWebsocket).
		HijackConnect(func(req *http.Request, client net.Conn, ctx *goproxy.ProxyCtx) {
		defer func() {
			if e := recover(); e != nil {
				ctx.Logf("error connecting to remote: %v", e)
				client.Write([]byte("HTTP/1.1 500 Cannot reach destination\r\n\r\n"))
			}
			client.Close()
		}()
		log.Println("Requete versTHE  websocket")
		clientBuf := bufio.NewReadWriter(bufio.NewReader(client), bufio.NewWriter(client))
		remote, err := connectDial(proxy, "tcp", req.URL.Host)
		orPanic(err)
		remoteBuf := bufio.NewReadWriter(bufio.NewReader(remote), bufio.NewWriter(remote))
		for {
			req, err := http.ReadRequest(clientBuf.Reader)
			orPanic(err)
			orPanic(req.Write(remoteBuf))
			orPanic(remoteBuf.Flush())
			resp, err := http.ReadResponse(remoteBuf.Reader, req)
			orPanic(err)
			orPanic(resp.Write(clientBuf.Writer))
			orPanic(clientBuf.Flush())
		}
	})*/

	/*proxy.OnRequest(IsWebsocket).DoFunc(func(r *http.Request, ctx *goproxy.ProxyCtx) (*http.Request, *http.Response) {
		log.Println("Requete vers websocket")
		return r, nil
	})*/

	proxy.OnRequest().DoFunc(
		func(r *http.Request, ctx *goproxy.ProxyCtx) (*http.Request, *http.Response) {
			name := ""
			err = engine.ScanMemory([]byte(r.Host), func(rule *yara.Rule) yara.CallbackStatus {
				name = rule.Identifier
				return yara.Abort
			})
			if name == "" {
				err = engine.ScanMemory([]byte(r.RequestURI), func(rule *yara.Rule) yara.CallbackStatus {
					name = rule.Identifier
					return yara.Abort
				})
			}
			if name != "" {
				log.Println("BLOCKED", name, r.RequestURI)
				return r, goproxy.NewResponse(r,
					goproxy.ContentTypeText, http.StatusForbidden,
					"I'm sorry, Dave. I'm afraid I can't do that.")
			}
			return r, nil
		})

	// Scan response - POC
	// TODO: refactoring
	proxy.OnResponse().DoFunc(func(resp *http.Response, ctx *goproxy.ProxyCtx) *http.Response {
		if resp == nil {
			return nil
		}
		contentType := resp.Header.Get("content-type")

		// http
		if strings.HasPrefix(contentType, "text/html") {
			resp.Body = htmlCleaner.Clean(resp.Body, ctx.Req.Host)
		} else if strings.HasPrefix(contentType, "text/css") {
			resp.Body = cssInjector.Inject(resp.Body, ctx.Req.Host)
		} else if strings.HasPrefix(contentType, "application/json") {
			// POC remove google ads on search
			if ctx.Req.Host == "www.google.fr" {
				// read body
				body, err := ioutil.ReadAll(resp.Body)
				if err != nil {
					ctx.Warnf("Pure - ERROR while reading body: %s", err)
					return resp
				}
				bodyPart := strings.Split(string(body), `/*""*/`)
				t := ""
				for _, p := range bodyPart {
					if strings.Contains(p, "commercial-unit") || strings.Contains(p, "tadsb") {
						continue
					}
					t = t + p + `/*""*/`
				}
				body = []byte(t)
				resp.Body = ioutil.NopCloser(bytes.NewBuffer(body))
			}
		}

		return resp
	})
	log.Fatal(http.ListenAndServe(*addr, proxy))
}
Beispiel #26
0
func newLruStore() lrustore {
	c := make(map[string]*lru.Cache)
	if cfg != nil && cfg.Destinations != nil {
		c[utils.DESTINATION_PREFIX], _ = lru.New(cfg.Destinations.Limit)
	} else {
		c[utils.DESTINATION_PREFIX], _ = lru.New(10000)
	}
	if cfg != nil && cfg.ReverseDestinations != nil {
		c[utils.REVERSE_DESTINATION_PREFIX], _ = lru.New(cfg.ReverseDestinations.Limit)
	} else {
		c[utils.REVERSE_DESTINATION_PREFIX], _ = lru.New(10000)
	}
	if cfg != nil && cfg.RatingPlans != nil {
		c[utils.RATING_PLAN_PREFIX], _ = lru.New(cfg.RatingPlans.Limit)
	} else {
		c[utils.RATING_PLAN_PREFIX], _ = lru.New(10000)
	}
	if cfg != nil && cfg.RatingProfiles != nil {
		c[utils.RATING_PROFILE_PREFIX], _ = lru.New(cfg.RatingProfiles.Limit)
	} else {
		c[utils.RATING_PROFILE_PREFIX], _ = lru.New(10000)
	}
	if cfg != nil && cfg.Lcr != nil {
		c[utils.LCR_PREFIX], _ = lru.New(cfg.Lcr.Limit)
	} else {
		c[utils.LCR_PREFIX], _ = lru.New(10000)
	}
	if cfg != nil && cfg.CdrStats != nil {
		c[utils.CDR_STATS_PREFIX], _ = lru.New(cfg.CdrStats.Limit)
	} else {
		c[utils.CDR_STATS_PREFIX], _ = lru.New(10000)
	}
	if cfg != nil && cfg.Actions != nil {
		c[utils.ACTION_PREFIX], _ = lru.New(cfg.Actions.Limit)
	} else {
		c[utils.ACTION_PREFIX], _ = lru.New(10000)
	}
	if cfg != nil && cfg.ActionPlans != nil {
		c[utils.ACTION_PLAN_PREFIX], _ = lru.New(cfg.ActionPlans.Limit)
	} else {
		c[utils.ACTION_PLAN_PREFIX], _ = lru.New(10000)
	}
	if cfg != nil && cfg.ActionTriggers != nil {
		c[utils.ACTION_TRIGGER_PREFIX], _ = lru.New(cfg.ActionTriggers.Limit)
	} else {
		c[utils.ACTION_TRIGGER_PREFIX], _ = lru.New(10000)
	}
	if cfg != nil && cfg.SharedGroups != nil {
		c[utils.SHARED_GROUP_PREFIX], _ = lru.New(cfg.SharedGroups.Limit)
	} else {
		c[utils.SHARED_GROUP_PREFIX], _ = lru.New(10000)
	}
	if cfg != nil && cfg.Aliases != nil {
		c[utils.ALIASES_PREFIX], _ = lru.New(cfg.Aliases.Limit)
	} else {
		c[utils.ALIASES_PREFIX], _ = lru.New(10000)
	}
	if cfg != nil && cfg.ReverseAliases != nil {
		c[utils.REVERSE_ALIASES_PREFIX], _ = lru.New(cfg.ReverseAliases.Limit)
	} else {
		c[utils.REVERSE_ALIASES_PREFIX], _ = lru.New(10000)
	}

	return c
}
Beispiel #27
0
	ParentId    string
	RepoTags    []string
	Size        int64
	VirtualSize int64

	Architecture string
	Author       string
	Comment      string
	//Config          *ContainerConfig
	Container string
	//ContainerConfig *ContainerConfig
	DockerVersion string
	Os            string
}

var imageCache, _ = lru.New(1024)

func ListImagesDetailed(dockerClient *dockerclient.DockerClient, all bool) ([]*DetailedImageInfo, error) {
	images, err := dockerClient.ListImages(all)
	if err != nil {
		return nil, err
	}
	var result = make([]*DetailedImageInfo, len(images))
	for i, image := range images {
		imagesDetails, _ := InspectImage(dockerClient, image.Id)
		detailedImageInfo := DetailedImageInfo{
			Created:       image.Created,
			Id:            image.Id,
			ParentId:      image.ParentId,
			RepoTags:      image.RepoTags,
			Size:          image.Size,