func (l *splunkLogger) tryPostMessages(messages []*splunkMessage) error { if len(messages) == 0 { return nil } var buffer bytes.Buffer var writer io.Writer var gzipWriter *gzip.Writer var err error // If gzip compression is enabled - create gzip writer with specified compression // level. If gzip compression is disabled, use standard buffer as a writer if l.gzipCompression { gzipWriter, err = gzip.NewWriterLevel(&buffer, l.gzipCompressionLevel) if err != nil { return err } writer = gzipWriter } else { writer = &buffer } for _, message := range messages { jsonEvent, err := json.Marshal(message) if err != nil { return err } if _, err := writer.Write(jsonEvent); err != nil { return err } } // If gzip compression is enabled, tell it, that we are done if l.gzipCompression { err = gzipWriter.Close() if err != nil { return err } } req, err := http.NewRequest("POST", l.url, bytes.NewBuffer(buffer.Bytes())) if err != nil { return err } req.Header.Set("Authorization", l.auth) // Tell if we are sending gzip compressed body if l.gzipCompression { req.Header.Set("Content-Encoding", "gzip") } res, err := l.client.Do(req) if err != nil { return err } defer res.Body.Close() if res.StatusCode != http.StatusOK { var body []byte body, err = ioutil.ReadAll(res.Body) if err != nil { return err } return fmt.Errorf("%s: failed to send event - %s - %s", driverName, res.Status, body) } io.Copy(ioutil.Discard, res.Body) return nil }
// WriteBytes writes an encrypted/compressed stream to an io.Writer func writeBytes(out io.Writer, key string, data []byte) error { var gzWriter *gzip.Writer // compressed writer var iv [aes.BlockSize]byte // initialization vector var cb cipher.Block // cipher block interface var err error // general error holder // init cipher block if cb, err = aes.NewCipher(hashKey(key)); err != nil { return err } // init encrypted writer encWriter := &cipher.StreamWriter{ S: cipher.NewOFB(cb, iv[:]), W: out, } // close when done defer encWriter.Close() // wrap encrypted writer gzWriter = gzip.NewWriter(encWriter) // close when done defer gzWriter.Close() // copy data to destination file compressing and encrypting along the way _, err = io.Copy(gzWriter, bytes.NewReader(data)) // return copy error return err }
func NewDscout(waiter *sync.WaitGroup, filename string) *Dscout { d := new(Dscout) d.Operation.Waiter = waiter d.Operation.Waiter.Add(1) file, err := os.Create(filename) if err != nil { return nil } d.closer = func() { file.Close() } var writer io.WriteCloser = file var compressor *gzip.Writer if strings.HasSuffix(filename, ".gz") { compressor = gzip.NewWriter(file) d.closer = func() { compressor.Close(); file.Close() } writer = compressor } uncompressed_name := strings.TrimRight(filename, ".gz") switch { case strings.HasSuffix(uncompressed_name, ".gob"): d.marshaler = new(formats.GobMarshaler) case strings.HasSuffix(uncompressed_name, ".xml"): d.marshaler = new(formats.XmlMarshaler) } if d.marshaler != nil { d.marshaler.InitFile(writer) } return d }
func CreateGobsFile(targetFilePath string, recs *[]interface{}, getRecPtr GobRecPtrMaker, gzipped bool) { var file, err = os.Create(targetFilePath) var gobber *gob.Encoder var gzipper *gzip.Writer if file != nil { defer file.Close() } if err != nil { panic(err) } if gzipped { if gzipper, err = gzip.NewWriterLevel(file, gzip.BestCompression); gzipper != nil { defer gzipper.Close() gobber = gob.NewEncoder(gzipper) } if err != nil { panic(err) } } else { gobber = gob.NewEncoder(file) } for _, rec := range *recs { if err = gobber.Encode(coreutil.PtrVal(getRecPtr(rec))); err != nil { panic(err) } } }
func newGzipResponseWriter(w http.ResponseWriter) *gzipResponseWriter { var gz *gzip.Writer if gzI := gzipWriterPool.Get(); gzI == nil { gz = gzip.NewWriter(w) } else { gz = gzI.(*gzip.Writer) gz.Reset(w) } return &gzipResponseWriter{WriteCloser: gz, ResponseWriter: w} }
func (this *HtmlView) Publish(ctxt *web.Context) (err error) { names := mvc.GetMvcMeta(ctxt) if names[mvc.MVC_ACTION] == "" { names[mvc.MVC_ACTION] = "_" } var tmpl *template.Template tmpl, err = this.getTmpl(names) if err == nil { var method = ctxt.Method() ctxt.SetHeader("Content-Type", "text/html; charset=utf-8") if method != "HEAD" { var err error var tw io.Writer = ctxt.Response var gzipwriter *gzip.Writer if ctxt.ReqHeaderHas("Accept-Encoding", "gzip") { ctxt.SetHeader("Content-Encoding", "gzip") gzipwriter, _ = gzip.NewWriterLevel(ctxt.Response, gzip.BestSpeed) tw = gzipwriter } ctxt.SetHeader("Vary", "Accept-Encoding") ctxt.Response.WriteHeader(200) err = tmpl.Execute(tw, this.VM) if err != nil { // Header already sent... multiple write headers //panic(err) log.Println(err) } if gzipwriter != nil { gzipwriter.Close() } if flushw, ok := ctxt.Response.(http.Flusher); ok { flushw.Flush() } } else { ctxt.Response.WriteHeader(200) } } else { log.Println(err) ctxt.SetErrorCode(500) } return }
func webQuitHandler(diskwriter *csv.Writer, gzipwriter *gzip.Writer, csvfile *os.File) http.HandlerFunc { return func(w http.ResponseWriter, r *http.Request) { fmt.Fprintf(w, "flushing to disk and shutting down") diskwriter.Flush() if gzipwriter != nil { gzipwriter.Flush() gzipwriter.Close() } csvfile.Close() os.Exit(0) } }
func main() { var file *os.File var err error var writer *gzip.Writer var body []byte if file, err = os.Create("output/sample.tar.gz"); err != nil { log.Fatalln(err) } defer file.Close() // gzip.NewWriter()なら、エラーを返さないので便利 if writer, err = gzip.NewWriterLevel(file, gzip.BestCompression); err != nil { log.Fatalln(err) } defer writer.Close() var filepaths = []string{ "files/b0044482_1413812.jpg", "files/dart_flight_school.png", "files/golang.txt", } // Write()がio.Writerと同じなので、そのまま行ける。 // buf := new(bytes.Buffer) // tw := tar.NewWriter(buf) tw := tar.NewWriter(writer) defer tw.Close() for _, filepath := range filepaths { if body, err = ioutil.ReadFile(filepath); err != nil { log.Fatalln(err) } if body != nil { hdr := &tar.Header{ Name: path.Base(filepath), Size: int64(len(body)), } if err := tw.WriteHeader(hdr); err != nil { println(err) } if _, err := tw.Write(body); err != nil { println(err) } } } // writer.Write(buf.Bytes()) // writer.Flush() }
func (s *server) serveSync(rw http.ResponseWriter, req *http.Request) { memberID := s.validateConnection(rw, req) if memberID == "" { return } s.logger.Infof("Peer Member %s started synchronization from address %s", memberID, req.RemoteAddr) // Create a new channel for the new member synchronization syncChan := make(chan []byte) s.syncReqChannel <- syncChan // Send the response header to client side flusher := rw.(http.Flusher) gzipped := strings.Contains(req.Header.Get("Accept-Encoding"), "gzip") var gzipWriter *gzip.Writer var err error if gzipped { // override the response writer to be a gzipped writer if gzipWriter, err = gzip.NewWriterLevel(rw, gzip.DefaultCompression); err == nil { rw = gzipResponseWrapper{Writer: gzipWriter, ResponseWriter: rw} rw.Header().Set("Content-Encoding", "gzip") flusher = rw.(http.Flusher) defer func() { gzipWriter.Close() }() } else { s.logger.Warnf("Gzip wrapper creation for %s from %s failed: %v. Falling back to uncompressed HTTP", memberID, req.RemoteAddr, err) } } encoder := newEncoder(rw) var msgID uint64 for data := range syncChan { ev := &sse{id: strconv.FormatUint(msgID, 10), event: "SYNC", data: string(data)} // Write to the ResponseWriter // Server Sent Events compatible if err := encoder.Encode(ev); err != nil { s.logger.WithFields(log.Fields{ "error": err, }).Errorf("Failed to encode sync message to peer member %s", memberID) return } msgID++ } flusher.Flush() s.logger.Infof("Peer Member %s synchronization has been completed successfully", memberID) }
func createInvoiceFile(filename string) (io.WriteCloser, func(), error) { file, err := os.Create(filename) if err != nil { return nil, nil, err } closer := func() { file.Close() } var writer io.WriteCloser = file var compressor *gzip.Writer if strings.HasSuffix(filename, ".gz") { compressor = gzip.NewWriter(file) closer = func() { compressor.Close(); file.Close() } writer = compressor } return writer, closer, nil }
func getPubmedRecords(urlFetcher *gopubmed.Fetcher, first bool, meshWriter *gzip.Writer, xmlWriter *gzip.Writer, transport *http.Transport, pmids []string) { preUrlTime := time.Now() articles, raw, err := urlFetcher.GetArticlesAndRaw(pmids) if err != nil { log.Fatal(err) } s := string(raw[:len(raw)]) for i := 0; i < len(articles); i++ { pubmedArticle := articles[i] if pubmedArticle.MedlineCitation != nil && pubmedArticle.MedlineCitation.MeshHeadingList != nil && pubmedArticle.MedlineCitation.MeshHeadingList.MeshHeading != nil { fmt.Fprint(meshWriter, articles[i].MedlineCitation.PMID.Text) for j := 0; j < len(pubmedArticle.MedlineCitation.MeshHeadingList.MeshHeading); j++ { fmt.Fprint(meshWriter, "|") fmt.Fprint(meshWriter, pubmedArticle.MedlineCitation.MeshHeadingList.MeshHeading[j].DescriptorName.Attr_UI) fmt.Fprint(meshWriter, "::"+pubmedArticle.MedlineCitation.MeshHeadingList.MeshHeading[j].DescriptorName.Text) if len(pubmedArticle.MedlineCitation.MeshHeadingList.MeshHeading[j].QualifierName) > 0 { fmt.Fprint(meshWriter, "=") for q := 0; q < len(pubmedArticle.MedlineCitation.MeshHeadingList.MeshHeading[j].QualifierName); q++ { if q != 0 { fmt.Fprint(meshWriter, "&") } fmt.Fprint(meshWriter, pubmedArticle.MedlineCitation.MeshHeadingList.MeshHeading[j].QualifierName[q].Attr_UI) fmt.Fprint(meshWriter, "::"+pubmedArticle.MedlineCitation.MeshHeadingList.MeshHeading[j].QualifierName[q].Text) } } } fmt.Fprintln(meshWriter, "") } } meshWriter.Flush() if !first { s = strings.Replace(s, startXml, "", -1) s = strings.Replace(s, docType, "", -1) s = strings.Replace(s, startPubmedArticleSet, "", -1) } s = strings.Replace(s, endPubmedArticleSet, "<!-- breakset -->", -1) xmlWriter.Write([]byte(s)) postUrlTime := time.Now() log.Println("Total request time:", postUrlTime.Sub(preUrlTime)) }
func (e *Engine) gzip() error { var buf bytes.Buffer var level int var err error level, err = e.stack.PopInt() if err == nil { var w *gzip.Writer w, err = gzip.NewWriterLevel(&buf, level) if err == nil { _, err = w.Write(e.stack.Pop()) w.Close() } } if err == nil { e.stack.Push(buf.Bytes()) } return err }
func writeSure(path string, info DirWalker) (err error) { file, err := os.Create(path) if err != nil { return } defer file.Close() var zfile io.Writer if strings.HasSuffix(path, ".gz") { var tmp *gzip.Writer tmp = gzip.NewWriter(file) defer tmp.Close() zfile = tmp } else { zfile = file } io.WriteString(zfile, magic) dumpDir(zfile, "__root__", info) return }
func (nf *Netflow) NewFileWriter(filename string, mode int, compress bool) error { log.Debugln("NewFileWriter") if _, ok := nf.writers[filename]; ok { return fmt.Errorf("netflow writer %v already exists", filename) } f, err := os.Create(filename) if err != nil { return err } c := make(chan *Packet, BUFFER_DEPTH) go func() { var w *gzip.Writer if compress { log.Debugln("using compression") w = gzip.NewWriter(f) } for { d := <-c if d == nil { break } if mode == ASCII { if compress { w.Write([]byte(d.GoString())) } else { f.Write([]byte(d.GoString())) } } else { if compress { w.Write(d.Raw) } else { f.Write(d.Raw) } } } if compress { w.Close() } f.Close() }() nf.registerWriter(filename, c) return nil }
func calcFileInfo(fi *FileInfo) { fmt.Printf("calcFileInfo: '%s'\n", fi.Path) const BufSize = 16 * 1024 var buf [BufSize]byte r, err := os.Open(fi.Path) fataliferr(err) defer r.Close() sha1 := sha1.New() md5Hash := md5.New() fi.ShouldCompress = false tryCompressFirsBlock := shouldTryCompressFile(fi.Path) var gzw *gzip.Writer compressedData := &bytes.Buffer{} fi.Size = 0 fi.CompressedData = nil for { n, err := r.Read(buf[:]) if err == io.EOF { break } d := buf[:n] fataliferr(err) fatalif(n == 0, "n is 0") fi.Size += n _, err = sha1.Write(d) fataliferr(err) _, err = md5Hash.Write(d) fataliferr(err) if tryCompressFirsBlock { tryCompressFirsBlock = false gz, err := gzip.NewWriterLevel(compressedData, gzip.BestCompression) fataliferr(err) _, err = gz.Write(d) fataliferr(err) gz.Close() compressedSize := compressedData.Len() saved := n - compressedSize // relatively high threshold of 20% savings on compression fi.ShouldCompress = saved > 0 && perc(compressedSize, saved) > 20 diff := n - compressedSize fmt.Printf(" should compress: %v, %d => %d (%d %.2f%%)\n", fi.ShouldCompress, n, compressedSize, diff, perc(n, diff)) if fi.ShouldCompress { compressedData = &bytes.Buffer{} gzw, err = gzip.NewWriterLevel(compressedData, gzip.BestCompression) fataliferr(err) } } if gzw != nil { _, err = gzw.Write(d) fataliferr(err) } } sha1Sum := sha1.Sum(nil) fi.Sha1Hex = fmt.Sprintf("%x", sha1Sum) if gzw != nil { gzw.Close() compressedSize := compressedData.Len() // only use compressed if compressed by at least 5% if compressedSize+(compressedSize/20) < fi.Size { fi.CompressedData = compressedData.Bytes() } } md5Sum := md5Hash.Sum(nil) fi.Md5Hex = fmt.Sprintf("%x", md5Sum) // if compressed, md5 is of the compressed content if fi.CompressedData != nil { md5Sum2 := md5.Sum(fi.CompressedData) fi.Md5Hex = fmt.Sprintf("%x", md5Sum2[:]) } fi.S3PathSha1Part = sha1HexToS3Path(fi.Sha1Hex) ext := strings.ToLower(filepath.Ext(fi.Path)) if fi.CompressedData != nil { fi.S3FullPath = fi.S3PathSha1Part + ".gz" + ext } else { fi.S3FullPath = fi.S3PathSha1Part + ext } fmt.Printf(" sha1: %s\n", fi.Sha1Hex) fmt.Printf(" md5: %s\n", fi.Md5Hex) fmt.Printf(" s3: %s\n", fi.S3FullPath) fmt.Printf(" size: %d\n", fi.Size) if fi.CompressedData != nil { sizedCompressed := len(fi.CompressedData) saved := fi.Size - sizedCompressed fmt.Printf(" size compressed: %d (saves %d %.2f%%)\n", sizedCompressed, saved, perc(fi.Size, saved)) } }
func (p Handler) processText(s *Session, w http.ResponseWriter, resp *http.Response) (err error) { var ( zr *gzip.Reader zw *gzip.Writer body []byte gzipped bool = resp.Header.Get("Content-Encoding") == "gzip" reqHost string = resp.Request.URL.Host reqPath string = resp.Request.URL.Path ) if resp.ContentLength != 0 && resp.Request.Method != "HEAD" { if gzipped { zr, err = gzip.NewReader(resp.Body) if err == nil { body, err = ioutil.ReadAll(zr) if !consumeError(&err) { return dumpError(err) } } } else { body, err = ioutil.ReadAll(resp.Body) if !consumeError(&err) { return dumpError(err) } } } w.Header().Del("Content-Length") w.Header().Set("Content-Encoding", "gzip") w.WriteHeader(resp.StatusCode) if len(body) <= 0 { return } var ( rules []ReRule bodyExtraHeader string ) switch p { case HD_html: rules = reRules.Html case HD_javascript: rules = reRules.Js case HD_json: rules = reRules.Json case HD_css: rules = reRules.Css } if log.V(5) { log.Infof("Original entity %s\n%s", reqPath, string(body)) } if s.abusing { imgSrc := fmt.Sprintf(`<img src="/!%s/sorry`, reqHost) body = bytes.Replace(body, []byte(`<img src="/sorry`), []byte(imgSrc), 1) rules = nil } for i, r := range rules { if r.PathRe != nil && r.PathRe.FindString(reqPath) == NULL { if log.V(4) { log.Infof("re.%d=[%s] pathRe=deny", i, r.ContentPattern.Pattern) } continue } if log.V(4) { log.Infof("re.%d=[%s] applied", i, r.ContentPattern.Pattern) } if r.Scheme&0xff > 0 { body = r.ContentRe.Replace(body, r.Replacement) } if r.Scheme&0xff00 > 0 { bodyExtraHeader += r.InsertHeader } } zw = gzip.NewWriter(w) if len(bodyExtraHeader) > 0 { zw.Write([]byte(bodyExtraHeader)) } zw.Write(body) err = zw.Flush() return }
func (h *handler) sendContinuousChangesByWebSocket(inChannels base.Set, options db.ChangesOptions) (error, bool) { forceClose := false handler := func(conn *websocket.Conn) { h.logStatus(101, "Upgraded to WebSocket protocol") defer func() { conn.Close() base.LogTo("HTTP+", "#%03d: --> WebSocket closed", h.serialNumber) }() // Read changes-feed options from an initial incoming WebSocket message in JSON format: var compress bool if msg, err := readWebSocketMessage(conn); err != nil { return } else { var channelNames []string var err error if _, options, _, channelNames, _, compress, err = h.readChangesOptionsFromJSON(msg); err != nil { return } if channelNames != nil { inChannels, _ = channels.SetFromArray(channelNames, channels.ExpandStar) } } // Set up GZip compression var writer *bytes.Buffer var zipWriter *gzip.Writer if compress { writer = bytes.NewBuffer(nil) zipWriter = GetGZipWriter(writer) } caughtUp := false _, forceClose = h.generateContinuousChanges(inChannels, options, func(changes []*db.ChangeEntry) error { var data []byte if changes != nil { data, _ = json.Marshal(changes) } else if !caughtUp { caughtUp = true data, _ = json.Marshal([]*db.ChangeEntry{}) } else { data = []byte{} } if compress && len(data) > 8 { // Compress JSON, using same GZip context, and send as binary msg: zipWriter.Write(data) zipWriter.Flush() data = writer.Bytes() writer.Reset() conn.PayloadType = websocket.BinaryFrame } else { conn.PayloadType = websocket.TextFrame } _, err := conn.Write(data) return err }) if zipWriter != nil { ReturnGZipWriter(zipWriter) } } server := websocket.Server{ Handshake: func(*websocket.Config, *http.Request) error { return nil }, Handler: handler, } server.ServeHTTP(h.response, h.rq) return nil, forceClose }
func (p *plug) daemon() { var diskwriter *csv.Writer var gzipwriter *gzip.Writer fmt.Println("starting foreground daemon ;-)") // write csv from disk into the buffer fmt.Println("loading history (" + p.csvfile + ")") p.buffer.Write(readcsv(p.csvfile)) // create/append the csvfile on disk csvfile, err := os.OpenFile(p.csvfile, os.O_APPEND|os.O_WRONLY|os.O_CREATE, 0600) if err != nil { log.Fatal("Error:", err) } defer csvfile.Close() // create a bufferwriter (appends to csv already in p.buffer) bufferwriter := csv.NewWriter(&p.buffer) // compressed or not if strings.Contains(p.csvfile, ".gz") { gzipwriter, _ = gzip.NewWriterLevel(csvfile, gzip.BestCompression) defer gzipwriter.Close() // wrap csv around gzipwriter diskwriter = csv.NewWriter(gzipwriter) } else { // create a diskwriter (appends to csv on disk) diskwriter = csv.NewWriter(csvfile) } // connect via telnet to the device and login conn, err := p.DialTimeout("tcp", p.device, time.Duration(time.Second*30)) if err != nil { log.Fatal("can't connect") } // create http handlers http.HandleFunc("/quit", webQuitHandler(diskwriter, gzipwriter, csvfile)) http.HandleFunc("/history", webHistoryHandler) http.HandleFunc("/stream", webStreamHandler) http.HandleFunc("/read.csv", webReadCsvHandler(p)) http.HandleFunc("/read.json", webReadJsonHandler(p)) // needed for occasionally flushing on a newline recordcount := 0 // start infinite polling loop for { // measure how long it takes start := time.Now() // specify correct format for dygraph record := []string{start.Format("2006/01/02 15:04:05")} // get statistics from device and cleanup status := sendln(conn, plugGetInfoStats, '#') status = strings.Replace(status, plugGetInfoStats+"\r\n", "", 1) status = strings.Replace(status, "#", "", 1) // split up the 4 results a newline results := strings.SplitN(status, "\r\n", 4) re := regexp.MustCompile("01(I|V|W|E)[0-9]+ 0*([0-9]+)") // for each GetInfo result, do a regexp match, adjust value and create a CSV record for i, result := range results { match := re.FindStringSubmatch(result) value := "0" // check if we got the right size of slice if len(match) == 3 { value = match[2] } temp, _ := strconv.ParseFloat(value, 32) switch i { case 0: // mAmp/10 -> Amp value = strconv.FormatFloat(temp/10000, 'f', 2, 32) // centiWatt -> Watt case 1: value = strconv.FormatFloat(temp/100, 'f', 2, 32) // mWatt/h -> Watt/h | mVolt -> Volt case 2, 3: value = strconv.FormatFloat(temp/1000, 'f', 2, 32) } record = append(record, value) recordcount += 1 } // latestentry is needed in JSON for the realtime streaming p.latestEntry, _ = json.Marshal(record) // write the record to disk err := diskwriter.Write(record) if err != nil { fmt.Println("Error:", err) } // write the record to buffer (in memory) err = bufferwriter.Write(record) if err != nil { fmt.Println("Error:", err) } // flush disk every 25 records if recordcount%100 == 0 { diskwriter.Flush() if strings.Contains(p.csvfile, ".gz") { gzipwriter.Flush() } } // flush memory immediately bufferwriter.Flush() if debug { fmt.Print(record) fmt.Println(" took", time.Since(start)) } // sleep the right amount of time time.Sleep(time.Second*time.Duration(p.delay) - time.Since(start)) } }
//go:generate goannotation $GOFILE // @rest GET /v1/msgs/:appid/:topic/:ver?group=xx&batch=10&reset=<newest|oldest>&ack=1&q=<dead|retry> func (this *subServer) subHandler(w http.ResponseWriter, r *http.Request, params httprouter.Params) { var ( topic string ver string myAppid string hisAppid string reset string group string realGroup string shadow string rawTopic string partition string partitionN int = -1 offset string offsetN int64 = -1 limit int // max messages to include in the message set delayedAck bool // last acked partition/offset piggybacked on this request err error ) if !Options.DisableMetrics { this.subMetrics.SubTryQps.Mark(1) } query := r.URL.Query() group = query.Get("group") myAppid = r.Header.Get(HttpHeaderAppid) realGroup = myAppid + "." + group reset = query.Get("reset") realIp := getHttpRemoteIp(r) if !manager.Default.ValidateGroupName(r.Header, group) { log.Error("sub -(%s): illegal group: %s", realIp, group) this.subMetrics.ClientError.Mark(1) writeBadRequest(w, "illegal group") return } if Options.BadGroupRateLimit && !this.throttleBadGroup.Pour(realGroup, 0) { this.goodGroupLock.RLock() _, good := this.goodGroupClients[r.RemoteAddr] this.goodGroupLock.RUnlock() if !good { // this bad group client is in confinement period log.Error("sub -(%s): group[%s] failure quota exceeded %s", realIp, realGroup, r.Header.Get("User-Agent")) writeQuotaExceeded(w) return } } limit, err = getHttpQueryInt(&query, "batch", 1) if err != nil { log.Error("sub -(%s): illegal batch: %v", realIp, err) this.subMetrics.ClientError.Mark(1) writeBadRequest(w, "illegal batch") return } if limit > Options.MaxSubBatchSize && Options.MaxSubBatchSize > 0 { limit = Options.MaxSubBatchSize } ver = params.ByName(UrlParamVersion) topic = params.ByName(UrlParamTopic) hisAppid = params.ByName(UrlParamAppid) // auth if err = manager.Default.AuthSub(myAppid, r.Header.Get(HttpHeaderSubkey), hisAppid, topic, group); err != nil { log.Error("sub[%s/%s] -(%s): {%s.%s.%s UA:%s} %v", myAppid, group, realIp, hisAppid, topic, ver, r.Header.Get("User-Agent"), err) this.subMetrics.ClientError.Mark(1) writeAuthFailure(w, err) return } // fetch the client ack partition and offset delayedAck = query.Get("ack") == "1" if delayedAck { // consumers use explicit acknowledges in order to signal a message as processed successfully // if consumers fail to ACK, the message hangs and server will refuse to move ahead // get the partitionN and offsetN from client header // client will ack with partition=-1, offset=-1: // 1. handshake phase // 2. when 204 No Content partition = r.Header.Get(HttpHeaderPartition) offset = r.Header.Get(HttpHeaderOffset) if partition != "" && offset != "" { // convert partition and offset to int offsetN, err = strconv.ParseInt(offset, 10, 64) if err != nil { log.Error("sub[%s/%s] %s(%s) {%s.%s.%s UA:%s} offset:%s", myAppid, group, r.RemoteAddr, realIp, hisAppid, topic, ver, r.Header.Get("User-Agent"), offset) this.subMetrics.ClientError.Mark(1) writeBadRequest(w, "ack with bad offset") return } partitionN, err = strconv.Atoi(partition) if err != nil { log.Error("sub[%s/%s] %s(%s) {%s.%s.%s UA:%s} partition:%s", myAppid, group, r.RemoteAddr, realIp, hisAppid, topic, ver, r.Header.Get("User-Agent"), partition) this.subMetrics.ClientError.Mark(1) writeBadRequest(w, "ack with bad partition") return } } else if len(partition+offset) != 0 { log.Error("sub[%s/%s] %s(%s) {%s.%s.%s P:%s O:%s UA:%s} partial ack", myAppid, group, r.RemoteAddr, realIp, hisAppid, topic, ver, partition, offset, r.Header.Get("User-Agent")) this.subMetrics.ClientError.Mark(1) writeBadRequest(w, "partial ack not allowed") return } } shadow = query.Get("q") log.Debug("sub[%s/%s] %s(%s) {%s.%s.%s q:%s batch:%d ack:%s P:%s O:%s UA:%s}", myAppid, group, r.RemoteAddr, realIp, hisAppid, topic, ver, shadow, limit, query.Get("ack"), partition, offset, r.Header.Get("User-Agent")) if !Options.DisableMetrics { this.subMetrics.SubQps.Mark(1) } // calculate raw topic according to shadow if shadow != "" { if !sla.ValidateShadowName(shadow) { log.Error("sub[%s/%s] %s(%s) {%s.%s.%s q:%s UA:%s} invalid shadow name", myAppid, group, r.RemoteAddr, realIp, hisAppid, topic, ver, shadow, r.Header.Get("User-Agent")) this.subMetrics.ClientError.Mark(1) writeBadRequest(w, "invalid shadow name") return } if !manager.Default.IsShadowedTopic(hisAppid, topic, ver, myAppid, group) { log.Error("sub[%s/%s] %s(%s) {%s.%s.%s q:%s UA:%s} not a shadowed topic", myAppid, group, r.RemoteAddr, realIp, hisAppid, topic, ver, shadow, r.Header.Get("User-Agent")) this.subMetrics.ClientError.Mark(1) writeBadRequest(w, "register shadow first") return } rawTopic = manager.Default.ShadowTopic(shadow, myAppid, hisAppid, topic, ver, group) } else { rawTopic = manager.Default.KafkaTopic(hisAppid, topic, ver) } cluster, found := manager.Default.LookupCluster(hisAppid) if !found { log.Error("sub[%s/%s] %s(%s) {%s.%s.%s UA:%s} cluster not found", myAppid, group, r.RemoteAddr, realIp, hisAppid, topic, ver, r.Header.Get("User-Agent")) this.subMetrics.ClientError.Mark(1) writeBadRequest(w, "invalid appid") return } fetcher, err := store.DefaultSubStore.Fetch(cluster, rawTopic, realGroup, r.RemoteAddr, realIp, reset, Options.PermitStandbySub) if err != nil { // e,g. kafka was totally shutdown // e,g. too many consumers for the same group log.Error("sub[%s/%s] -(%s): {%s.%s.%s UA:%s} %v", myAppid, group, realIp, hisAppid, topic, ver, r.Header.Get("User-Agent"), err) if store.DefaultSubStore.IsSystemError(err) { writeServerError(w, err.Error()) } else { this.subMetrics.ClientError.Mark(1) if Options.BadGroupRateLimit && !this.throttleBadGroup.Pour(realGroup, 1) { writeQuotaExceeded(w) } else { writeBadRequest(w, err.Error()) } } return } // commit the acked offset if delayedAck && partitionN >= 0 && offsetN >= 0 { if err = fetcher.CommitUpto(&sarama.ConsumerMessage{ Topic: rawTopic, Partition: int32(partitionN), Offset: offsetN, }); err != nil { // during rebalance, this might happen, but with no bad effects log.Trace("sub land[%s/%s] %s(%s) {%s/%s ack:1 O:%s UA:%s} %v", myAppid, group, r.RemoteAddr, realIp, rawTopic, partition, offset, r.Header.Get("User-Agent"), err) } else { log.Debug("sub land[%s/%s] %s(%s) {T:%s/%s, O:%s}", myAppid, group, r.RemoteAddr, realIp, rawTopic, partition, offset) } } var gz *gzip.Writer w, gz = gzipWriter(w, r) err = this.pumpMessages(w, r, realIp, fetcher, limit, myAppid, hisAppid, topic, ver, group, delayedAck) if err != nil { // e,g. broken pipe, io timeout, client gone // e,g. kafka: error while consuming app1.foobar.v1/0: EOF (kafka was shutdown) log.Error("sub[%s/%s] %s(%s) {%s ack:%s P:%s O:%s UA:%s} %v", myAppid, group, r.RemoteAddr, realIp, rawTopic, query.Get("ack"), partition, offset, r.Header.Get("User-Agent"), err) if err != ErrClientGone { if store.DefaultSubStore.IsSystemError(err) { writeServerError(w, err.Error()) } else { this.subMetrics.ClientError.Mark(1) if Options.BadGroupRateLimit && !this.throttleBadGroup.Pour(realGroup, 1) { writeQuotaExceeded(w) } else { writeBadRequest(w, err.Error()) } } } else if Options.BadGroupRateLimit && !store.DefaultSubStore.IsSystemError(err) { this.throttleBadGroup.Pour(realGroup, 1) } // fetch.Close might be called by subServer.closedConnCh if err = fetcher.Close(); err != nil { log.Error("sub[%s/%s] %s(%s) %s %v", myAppid, group, r.RemoteAddr, realIp, rawTopic, err) } } else if w.Header().Get("Connection") == "close" { // max req reached, synchronously close this connection if err = fetcher.Close(); err != nil { log.Error("sub[%s/%s] %s(%s) %s %v", myAppid, group, r.RemoteAddr, realIp, rawTopic, err) } } if Options.BadGroupRateLimit { // record the good consumer group client this.goodGroupLock.Lock() this.goodGroupClients[r.RemoteAddr] = struct{}{} this.goodGroupLock.Unlock() } if gz != nil { gz.Close() } }
func runBuild(args []string) (exit int) { if len(args) != 2 { stderr("build: Must provide directory and output file") return 1 } root := args[0] tgt := args[1] ext := filepath.Ext(tgt) if ext != schema.ACIExtension { stderr("build: Extension must be %s (given %s)", schema.ACIExtension, ext) return 1 } mode := os.O_CREATE | os.O_WRONLY if buildOverwrite { mode |= os.O_TRUNC } else { mode |= os.O_EXCL } fh, err := os.OpenFile(tgt, mode, 0644) if err != nil { if os.IsExist(err) { stderr("build: Target file exists (try --overwrite)") } else { stderr("build: Unable to open target %s: %v", tgt, err) } return 1 } var gw *gzip.Writer var r io.WriteCloser = fh if !buildNocompress { gw = gzip.NewWriter(fh) r = gw } tr := tar.NewWriter(r) defer func() { tr.Close() if !buildNocompress { gw.Close() } fh.Close() if exit != 0 && !buildOverwrite { os.Remove(tgt) } }() // TODO(jonboulle): stream the validation so we don't have to walk the rootfs twice if err := aci.ValidateLayout(root); err != nil { stderr("build: Layout failed validation: %v", err) return 1 } mpath := filepath.Join(root, aci.ManifestFile) b, err := ioutil.ReadFile(mpath) if err != nil { stderr("build: Unable to read Image Manifest: %v", err) return 1 } var im schema.ImageManifest if err := im.UnmarshalJSON(b); err != nil { stderr("build: Unable to load Image Manifest: %v", err) return 1 } iw := aci.NewImageWriter(im, tr) err = filepath.Walk(root, aci.BuildWalker(root, iw)) if err != nil { stderr("build: Error walking rootfs: %v", err) return 1 } err = iw.Close() if err != nil { stderr("build: Unable to close image %s: %v", tgt, err) return 1 } return }
//go:generate goannotation $GOFILE // @rest GET /v1/raw/msgs/:cluster/:topic?group=xx&batch=10&reset=<newest|oldest> func (this *subServer) subRawHandler(w http.ResponseWriter, r *http.Request, params httprouter.Params) { var ( cluster string topic string myAppid string reset string group string limit int // max messages to include in the message set err error ) if !Options.DisableMetrics { this.subMetrics.SubTryQps.Mark(1) } query := r.URL.Query() group = query.Get("group") reset = query.Get("reset") realIp := getHttpRemoteIp(r) if !manager.Default.ValidateGroupName(r.Header, group) { log.Error("sub raw -(%s): illegal group: %s", realIp, group) this.subMetrics.ClientError.Mark(1) writeBadRequest(w, "illegal group") return } limit, err = getHttpQueryInt(&query, "batch", 1) if err != nil { log.Error("sub raw -(%s): illegal batch: %v", realIp, err) this.subMetrics.ClientError.Mark(1) writeBadRequest(w, "illegal batch") return } if limit > Options.MaxSubBatchSize && Options.MaxSubBatchSize > 0 { limit = Options.MaxSubBatchSize } topic = params.ByName(UrlParamTopic) cluster = params.ByName("cluster") myAppid = r.Header.Get(HttpHeaderAppid) log.Debug("sub raw[%s/%s] %s(%s) {%s/%s batch:%d UA:%s}", myAppid, group, r.RemoteAddr, realIp, cluster, topic, limit, r.Header.Get("User-Agent")) if !Options.DisableMetrics { this.subMetrics.SubQps.Mark(1) } fetcher, err := store.DefaultSubStore.Fetch(cluster, topic, myAppid+"."+group, r.RemoteAddr, realIp, reset, Options.PermitStandbySub) if err != nil { // e,g. kafka was totally shutdown // e,g. too many consumers for the same group log.Error("sub raw[%s/%s] %s(%s) {%s/%s batch:%d UA:%s} %v", myAppid, group, r.RemoteAddr, realIp, cluster, topic, limit, r.Header.Get("User-Agent"), err) if store.DefaultSubStore.IsSystemError(err) { writeServerError(w, err.Error()) } else { writeBadRequest(w, err.Error()) } return } var gz *gzip.Writer w, gz = gzipWriter(w, r) err = this.pumpRawMessages(w, r, realIp, fetcher, limit, myAppid, topic, group) if err != nil { // e,g. broken pipe, io timeout, client gone // e,g. kafka: error while consuming app1.foobar.v1/0: EOF (kafka was shutdown) log.Error("sub raw[%s/%s] %s(%s) {%s/%s batch:%d UA:%s} %v", myAppid, group, r.RemoteAddr, realIp, cluster, topic, limit, r.Header.Get("User-Agent"), err) if err != ErrClientGone { if store.DefaultSubStore.IsSystemError(err) { writeServerError(w, err.Error()) } else { writeBadRequest(w, err.Error()) } } // fetch.Close might be called by subServer.closedConnCh if err = fetcher.Close(); err != nil { log.Error("sub raw[%s/%s] %s(%s) {%s/%s batch:%d UA:%s} %v", myAppid, group, r.RemoteAddr, realIp, cluster, topic, limit, r.Header.Get("User-Agent"), err) } } if gz != nil { gz.Close() } }
func NewEventHandler(buildsDB BuildsDB, buildID int) http.Handler { return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { flusher := w.(http.Flusher) closed := w.(http.CloseNotifier).CloseNotify() w.Header().Add("Content-Type", "text/event-stream; charset=utf-8") w.Header().Add("Cache-Control", "no-cache, no-store, must-revalidate") w.Header().Add("Connection", "keep-alive") w.Header().Add(ProtocolVersionHeader, CurrentProtocolVersion) var start uint = 0 if r.Header.Get("Last-Event-ID") != "" { _, err := fmt.Sscanf(r.Header.Get("Last-Event-ID"), "%d", &start) if err != nil { w.WriteHeader(http.StatusBadRequest) return } start++ } var responseWriter io.Writer = w var responseFlusher *gzip.Writer w.Header().Add("Vary", "Accept-Encoding") if strings.Contains(r.Header.Get("Accept-Encoding"), "gzip") { w.Header().Set("Content-Encoding", "gzip") gz := gzip.NewWriter(w) defer gz.Close() responseWriter = gz responseFlusher = gz } events, err := buildsDB.GetBuildEvents(buildID, start) if err != nil { w.WriteHeader(http.StatusNotFound) return } defer events.Close() es := make(chan atc.Event) errs := make(chan error, 1) go func() { for { ev, err := events.Next() if err != nil { errs <- err return } else { select { case es <- ev: case <-closed: return } } } }() for { select { case ev := <-es: payload, err := json.Marshal(event.Message{ev}) if err != nil { return } err = sse.Event{ ID: fmt.Sprintf("%d", start), Name: "event", Data: payload, }.Write(responseWriter) if err != nil { return } start++ if responseFlusher != nil { err = responseFlusher.Flush() if err != nil { return } } flusher.Flush() case err := <-errs: if err == db.ErrEndOfBuildEventStream { err = sse.Event{Name: "end"}.Write(responseWriter) if err != nil { return } } return case <-closed: return } } return }) }
func main() { log.SetFlags(log.LstdFlags | log.Lshortfile) tr := &http.Transport{ ResponseHeaderTimeout: time.Second * 500, DisableKeepAlives: false, DisableCompression: false, } var wXml *gzip.Writer = nil var ww *bufio.Writer = nil var xFile *os.File = nil meshFile, err2 := os.Create(meshFile) if err2 != nil { return } defer meshFile.Close() wwMesh := bufio.NewWriter(meshFile) wMesh := gzip.NewWriter(wwMesh) //w := bufio.NewWriter(file) numIdsPerUrl := findNumIdsPerUrl() pmids := make([]string, numIdsPerUrl) urlFetcher := gopubmed.Fetcher{ Ssl: false, Transport: &http.Transport{ ResponseHeaderTimeout: time.Second * 500, DisableKeepAlives: false, DisableCompression: false, //TLSClientConfig: &tls.Config{InsecureSkipVerify: true}, }, } allCount := 0 count := 0 reader, err := makeReader() if err != nil { log.Fatal(err) } first := true chunkCount := 0 for { thisNumIdsPerUrl := findNumIdsPerUrl() if numIdsPerUrl != thisNumIdsPerUrl { numIdsPerUrl = thisNumIdsPerUrl pmids = make([]string, numIdsPerUrl) } line, err := reader.ReadString('\n') if err != nil { // You may check here if err == io.EOF break } line = strings.TrimSpace(line) err = lineChecker(line) if err != nil { log.Fatal(err) } //log.Println(line) pmids[count] = line if wXml == nil { wXml, ww, xFile = makeXmlWriter(allCount, pmids[0]) } count = count + 1 // Collected enough pmids: get their XML from NIH if count == numIdsPerUrl { getPubmedRecords(&urlFetcher, first, wMesh, wXml, tr, pmids) checkTime() first = false count = 0 zeroArray(pmids) } else { } allCount += 1 chunkCount += 1 // Start new xml file: close old one: open new one if chunkCount > recordsPerFile { fmt.Fprintln(wXml, endPubmedArticleSet) wXml.Flush() wXml.Close() ww.Flush() wXml, ww, xFile = makeXmlWriter(allCount, pmids[0]) chunkCount = 0 first = true } if allCount%500 == 0 { log.Println(allCount) } } if count != 0 { getPubmedRecords(&urlFetcher, first, wMesh, wXml, tr, pmids) } fmt.Fprintln(wXml, endPubmedArticleSet) wXml.Flush() wXml.Close() ww.Flush() wMesh.Flush() wwMesh.Flush() wMesh.Close() xFile.Close() }
// ReturnWriter returns a gzip.Writer to the pool that can // late be reused via GetWriter. // Don't close the writer, Flush will be called before returning // it to the pool. func (pool *GzipPool) ReturnWriter(writer *gzip.Writer) { writer.Close() pool.pool.Put(writer) }
func createACI(dir string, imageName string) error { var errStr string var errRes error buildNocompress := true root := dir tgt := imageName ext := filepath.Ext(tgt) if ext != schema.ACIExtension { errStr = fmt.Sprintf("build: Extension must be %s (given %s)", schema.ACIExtension, ext) errRes = errors.New(errStr) return errRes } if err := aci.ValidateLayout(root); err != nil { if e, ok := err.(aci.ErrOldVersion); ok { if globalFlags.Debug { stderr("build: Warning: %v. Please update your manifest.", e) } } else { errStr = fmt.Sprintf("build: Layout failed validation: %v", err) errRes = errors.New(errStr) return errRes } } mode := os.O_CREATE | os.O_WRONLY | os.O_TRUNC fh, err := os.OpenFile(tgt, mode, 0644) if err != nil { errStr = fmt.Sprintf("build: Unable to open target %s: %v", tgt, err) errRes = errors.New(errStr) return errRes } var gw *gzip.Writer var r io.WriteCloser = fh if !buildNocompress { gw = gzip.NewWriter(fh) r = gw } tr := tar.NewWriter(r) defer func() { tr.Close() if !buildNocompress { gw.Close() } fh.Close() }() mpath := filepath.Join(root, aci.ManifestFile) b, err := ioutil.ReadFile(mpath) if err != nil { errStr = fmt.Sprintf("build: Unable to read Image Manifest: %v", err) errRes = errors.New(errStr) return errRes } var im schema.ImageManifest if err := im.UnmarshalJSON(b); err != nil { errStr = fmt.Sprintf("build: Unable to load Image Manifest: %v", err) errRes = errors.New(errStr) return errRes } iw := aci.NewImageWriter(im, tr) err = filepath.Walk(root, aci.BuildWalker(root, iw)) if err != nil { errStr = fmt.Sprintf("build: Error walking rootfs: %v", err) errRes = errors.New(errStr) return errRes } err = iw.Close() if err != nil { errStr = fmt.Sprintf("build: Unable to close image %s: %v", tgt, err) errRes = errors.New(errStr) return errRes } return nil }
// BuildACI takes an input directory that conforms to the ACI specification, // and outputs an optionally compressed ACI image. func BuildACI(root string, tgt string, overwrite bool, nocompress bool) (ret error) { ext := filepath.Ext(tgt) if ext != schema.ACIExtension { ret = fmt.Errorf("build: Extension must be %s (given %s)", schema.ACIExtension, ext) return } mode := os.O_CREATE | os.O_WRONLY if overwrite { mode |= os.O_TRUNC } else { mode |= os.O_EXCL } fh, err := os.OpenFile(tgt, mode, 0644) if err != nil { if os.IsExist(err) { ret = fmt.Errorf("build: Target file exists") } else { ret = fmt.Errorf("build: Unable to open target %s: %v", tgt, err) } return } var gw *gzip.Writer var r io.WriteCloser = fh if !nocompress { gw = gzip.NewWriter(fh) r = gw } tr := tar.NewWriter(r) defer func() { tr.Close() if !nocompress { gw.Close() } fh.Close() if ret != nil && !overwrite { os.Remove(tgt) } }() // TODO(jonboulle): stream the validation so we don't have to walk the rootfs twice if err := aci.ValidateLayout(root); err != nil { ret = fmt.Errorf("build: Layout failed validation: %v", err) return } mpath := filepath.Join(root, aci.ManifestFile) b, err := ioutil.ReadFile(mpath) if err != nil { ret = fmt.Errorf("build: Unable to read Image Manifest: %v", err) return } var im schema.ImageManifest if err := im.UnmarshalJSON(b); err != nil { ret = fmt.Errorf("build: Unable to load Image Manifest: %v", err) return } iw := aci.NewImageWriter(im, tr) err = filepath.Walk(root, aci.BuildWalker(root, iw)) if err != nil { ret = fmt.Errorf("build: Error walking rootfs: %v", err) return } err = iw.Close() if err != nil { ret = fmt.Errorf("build: Unable to close image %s: %v", tgt, err) return } return nil }
func (this *ResultView) Publish(ctxt *web.Context) (err error) { names := mvc.GetMvcMeta(ctxt) if names[mvc.MVC_ACTION] == "" { names[mvc.MVC_ACTION] = "_" } var tmpl *template.Template var ext string /* var mc = memcache.New("127.0.0.1:11211") var item *memcache.Item if item, err = mc.Get(ctxt.Request.RequestURI); err == nil { ctxt.SetHeader("Content-Type", GetContentType(ext)) if ctxt.ReqHeaderHas("Accept-Encoding", "gzip") { ctxt.SetHeader("Content-Encoding", "gzip") } ctxt.Response.Write(item.Value) return } */ tmpl, ext, err = this.getTmpl(names) if err == nil { var isjsonp bool var jsonp string var method = ctxt.Method() if ext == ".jsonp" { jsonp = ctxt.RequestValue("callback") if jsonp == "" { jsonp = ctxt.RequestValue("jsonp") } if (method == "GET" || method == "HEAD") && jsonp != "" && ext == ".jsonp" && validate.IsJSONPCallback(jsonp) { ctxt.SetHeader("Content-Type", "application/javascript") ctxt.SetHeader("Content-Disposition", "attachment; filename=jsonp.jsonp") ctxt.SetHeader("X-Content-Type-Options", "nosniff") isjsonp = true } else { err = errors.New("Invalid jsonp callback") log.Println(err) ctxt.SetErrorCode(403) return } } else { ctxt.SetHeader("Content-Type", GetContentType(ext)) } if method != "HEAD" { var err error var b *bytes.Buffer = bytes.NewBuffer(make([]byte, 0, 5120)) var tw io.Writer = io.MultiWriter(ctxt.Response, b) var gzipwriter *gzip.Writer if ctxt.ReqHeaderHas("Accept-Encoding", "gzip") { ctxt.SetHeader("Content-Encoding", "gzip") gzipwriter, _ = gzip.NewWriterLevel(tw, gzip.BestSpeed) tw = gzipwriter } ctxt.SetHeader("Vary", "Accept-Encoding") ctxt.Response.WriteHeader(200) if isjsonp { writeJsonpStart(jsonp, tw) } err = tmpl.Execute(tw, this.VM) if err != nil { // Header already sent... multiple write headers //panic(err) log.Println(err) } if isjsonp { writeJsonpEnd(jsonp, tw) } if gzipwriter != nil { gzipwriter.Close() } //mc.Set(&memcache.Item{Key: ctxt.Request.RequestURI, Value: b.Bytes(), Expiration: 3600}) if flushw, ok := ctxt.RootResponse().(http.Flusher); ok { flushw.Flush() } } else { ctxt.Response.WriteHeader(200) } } else { log.Println(err) ctxt.SetErrorCode(500) } return }
// PutWriter closes and returns a gzip.Writer to the pool // so that it can be reused via GetWriter. func (pool *GzipPool) PutWriter(writer *gzip.Writer) { writer.Close() pool.writers.Put(writer) }
func runPatchManifest(args []string) (exit int) { var fh *os.File var err error if patchReplace && patchOverwrite { stderr("patch-manifest: Cannot use both --replace and --overwrite") return 1 } if !patchReplace && len(args) != 2 { stderr("patch-manifest: Must provide input and output files (or use --replace)") return 1 } if patchReplace && len(args) != 1 { stderr("patch-manifest: Must provide one file") return 1 } if patchManifestFile != "" && (patchName != "" || patchExec != "" || patchUser != "" || patchGroup != "" || patchCaps != "" || patchMounts != "") { stderr("patch-manifest: --manifest is incompatible with other manifest editing options") return 1 } inputFile = args[0] // Prepare output writer if patchReplace { fh, err = ioutil.TempFile(path.Dir(inputFile), ".actool-tmp."+path.Base(inputFile)+"-") if err != nil { stderr("patch-manifest: Cannot create temporary file: %v", err) return 1 } } else { outputFile = args[1] ext := filepath.Ext(outputFile) if ext != schema.ACIExtension { stderr("patch-manifest: Extension must be %s (given %s)", schema.ACIExtension, ext) return 1 } mode := os.O_CREATE | os.O_WRONLY if patchOverwrite { mode |= os.O_TRUNC } else { mode |= os.O_EXCL } fh, err = os.OpenFile(outputFile, mode, 0644) if err != nil { if os.IsExist(err) { stderr("patch-manifest: Output file exists (try --overwrite)") } else { stderr("patch-manifest: Unable to open output %s: %v", outputFile, err) } return 1 } } var gw *gzip.Writer var w io.WriteCloser = fh if !patchNocompress { gw = gzip.NewWriter(fh) w = gw } tw := tar.NewWriter(w) defer func() { tw.Close() if !patchNocompress { gw.Close() } fh.Close() if exit != 0 && !patchOverwrite { os.Remove(fh.Name()) } }() // Prepare input reader input, err := os.Open(inputFile) if err != nil { stderr("patch-manifest: Cannot open %s: %v", inputFile, err) return 1 } defer input.Close() tr, err := aci.NewCompressedTarReader(input) if err != nil { stderr("patch-manifest: Cannot extract %s: %v", inputFile, err) return 1 } defer tr.Close() var newManifest []byte if patchManifestFile != "" { mr, err := os.Open(patchManifestFile) if err != nil { stderr("patch-manifest: Cannot open %s: %v", patchManifestFile, err) return 1 } defer input.Close() newManifest, err = ioutil.ReadAll(mr) if err != nil { stderr("patch-manifest: Cannot read %s: %v", patchManifestFile, err) return 1 } } err = extractManifest(tr.Reader, tw, false, newManifest) if err != nil { stderr("patch-manifest: Unable to read %s: %v", inputFile, err) return 1 } if patchReplace { err = os.Rename(fh.Name(), inputFile) if err != nil { stderr("patch-manifest: Cannot rename %q to %q: %v", fh.Name, inputFile, err) return 1 } } return }
func (svc *Service) ServeHTTP(w http.ResponseWriter, r *http.Request) { var match []string var parms []string var authparms map[string]interface{} var i, j int var endpoint UrlNode var resp Response var ok bool var err error var mrsh Marshaler var umrsh Unmarshaler var outWriter io.Writer var encRequest []string var enc, e string var gzw *gzip.Writer var header string var qry string var buf []byte var httpstat int var authinfo interface{} Goose.Serve.Logf(1, "Access %s from %s", r.URL.Path, r.RemoteAddr) if r.URL.Path == "/crtlogin" { w.WriteHeader(http.StatusOK) w.Write([]byte(" ")) return } hd := w.Header() hd.Add("Access-Control-Allow-Origin", "*") hd.Add("Vary", "Origin") Goose.Serve.Logf(6, "Will check if swagger.json is requested: %#v", svc.Swagger) if r.URL.Path == "/swagger.json" { defer func() { if r := recover(); r != nil { Goose.Serve.Logf(1, "Internal server error writing response body for swagger.json: %#v", r) } }() hd.Add("Content-Type", "application/json") // w.WriteHeader(http.StatusOK) Goose.Serve.Logf(6, "Received request of swagger.json: %#v", svc.Swagger) // mrsh = json.NewEncoder(w) // err = mrsh.Encode(svc.Swagger) buf, err = json.Marshal(svc.Swagger) if err != nil { Goose.Serve.Logf(1, "Internal server error marshaling swagger.json: %s", err) } hd.Add("Content-Length", fmt.Sprintf("%d", len(buf))) _, err = io.WriteString(w, string(buf)) if err != nil { Goose.Serve.Logf(1, "Internal server error writing response body for swagger.json: %s", err) } return } match = svc.Matcher.FindStringSubmatch(r.Method + ":" + r.URL.Path) Goose.Serve.Logf(6, "Matcher found this %#v\n", match) if len(match) == 0 { Goose.Serve.Logf(1, "Invalid service handler "+r.URL.Path) w.WriteHeader(http.StatusBadRequest) w.Write([]byte("Invalid service handler " + r.URL.Path)) return } parms = []string{} authparms = map[string]interface{}{} // for _, endpoint = range svc.Svc { for i = 1; i < len(match); i++ { Goose.Serve.Logf(5, "trying %s:%s with endpoint: %s", r.Method, r.URL.Path, svc.Svc[svc.MatchedOps[i-1]].Path) if len(match[i]) > 0 { Goose.Serve.Logf(5, "Found endpoint %s for: %s", svc.Svc[svc.MatchedOps[i-1]].Path, r.URL.Path) endpoint = svc.Svc[svc.MatchedOps[i-1]] for j = i + 1; (j < len(match)) && (len(match[j]) > 0); j++ { authparms[endpoint.ParmNames[j-i-1]] = match[j] } parms = match[i+1 : j] j -= i + 1 break } } Goose.Serve.Logf(5, "Original parms: %#v", parms) if r.Method == "OPTIONS" { Goose.Serve.Logf(4, "CORS Options called on "+r.URL.Path) hd.Add("Access-Control-Allow-Methods", "POST, GET, OPTIONS, PUT, DELETE") //Access-Control-Allow-Origin: http://foo.example //Access-Control-Allow-Methods: POST, GET, OPTIONS //Access-Control-Allow-Headers: X-PINGOTHER //Access-Control-Allow-Origin: * hd.Add("Access-Control-Allow-Headers", strings.Join(endpoint.Headers, ", ")) w.WriteHeader(http.StatusOK) w.Write([]byte("OK")) return } if len(endpoint.Query) > 0 { r.ParseForm() for _, qry = range endpoint.Query { if _, ok := r.Form[qry]; !ok { Goose.Serve.Logf(1, "%s: %s", ErrorMissingRequiredQueryField, qry) return } parms = append(parms, r.Form[qry][0]) // TODO array support authparms[endpoint.ParmNames[j]] = r.Form[qry][0] j++ } } Goose.Serve.Logf(5, "Parms with query: %#v", parms) for _, header = range endpoint.Headers { if (r.Header[header] == nil) || (len(r.Header[header]) == 0) { Goose.Serve.Logf(1, "%s: %s", ErrorMissingRequiredHTTPHeader, header) Goose.Serve.Logf(6, "HTTP Headers found: %#v", r.Header) return } parms = append(parms, r.Header[header][0]) // TODO array support authparms[endpoint.ParmNames[j]] = r.Header[header][0] j++ } Goose.Serve.Logf(5, "Parms with headers: %#v", parms) Goose.Serve.Logf(5, "checking marshalers: %s, %s", endpoint.consumes, endpoint.produces) if endpoint.consumes == "application/json" { umrsh = json.NewDecoder(r.Body) } else if endpoint.consumes == "application/xml" { umrsh = xml.NewDecoder(r.Body) } else if endpoint.consumes == "multipart/form-data" { // bdy, err = ioutil.ReadAll(r.Body) // ioutil.WriteFile("upload.bin", bdy, 0600) // Goose.Serve.Logf(6,"body=%s",bdy) umrsh, err = NewMultipartUnmarshaler(r, endpoint.Body) if err != nil { Goose.Serve.Logf(1, "Error initializing multipart/formdata unmarshaller for %s: %s", r.URL.Path, err) return } } Goose.Serve.Logf(6, "umrsh=%#v", umrsh) outWriter = w if encRequest, ok = r.Header["Accept-Encoding"]; ok { Goose.Serve.Logf(6, "Accept-Encoding: %#v", encRequest) if svc.AllowGzip == true { Goose.Serve.Logf(5, "svc.AllowGzip == true") gzipcheck: for _, enc = range encRequest { for _, e = range strings.Split(enc, ", ") { Goose.Serve.Logf(5, "Encoding: %s", e) if e == "gzip" { Goose.Serve.Logf(5, "Using gzip") gzw = gzip.NewWriter(w) outWriter = gzHttpResponseWriter{ Writer: gzw, ResponseWriter: w, } defer gzw.Close() hd.Add("Vary", "Accept-Encoding") hd.Add("Content-Encoding", "gzip") break gzipcheck } } } } } if endpoint.produces == "application/json" { mrsh = json.NewEncoder(outWriter) hd.Add("Content-Type", "application/json") } else if endpoint.produces == "application/xml" { mrsh = xml.NewEncoder(outWriter) hd.Add("Content-Type", "application/xml") } Goose.Serve.Logf(5, "svc.Access: %d", svc.Access) err = nil if svc.Access != AccessNone { httpstat, authinfo, err = svc.Authorizer.Authorize(endpoint.Path, authparms, r.RemoteAddr, r.TLS, svc.SavePending) } if err == nil { Goose.Serve.Logf(5, "Authorization returned HTTP status %d", httpstat) if svc.Access == AccessAuthInfo || svc.Access == AccessVerifyAuthInfo { resp = endpoint.Handle(parms, umrsh, authinfo) } else { resp = endpoint.Handle(parms, umrsh, nil) } if resp.Status != 0 { for k, v := range resp.Header { hd.Add(k, v) } w.WriteHeader(resp.Status) } if resp.Status != http.StatusNoContent { err = mrsh.Encode(resp.Body) if err != nil { Goose.Serve.Logf(1, "Internal server error writing response body (no status sent to client): %s", err) return } } } else { Goose.Serve.Logf(1, "Authorization failure with HTTP Status %d and error %s", httpstat, err) w.WriteHeader(httpstat) if httpstat != http.StatusNoContent { err = mrsh.Encode(fmt.Sprintf("%s", err)) if err != nil { Goose.Serve.Logf(1, "Internal server error writing response body (no status sent to client): %s", err) return } } } }