func main() { filecsv := `C:\Users\yejianfeng\Desktop\mxm\skill.csv` file, err := os.Open(filecsv) if err != nil { panic(err) } defer file.Close() reader := csv.NewReader(file) roleFolder := `C:\Users\yejianfeng\Desktop\mxm\skill\` reader.Read() for { fields, err := reader.Read() if err == io.EOF { break } picurl := fields[15] jpg := roleFolder + picurl + ".jpg" _, err = os.Open(jpg) if err == nil || os.IsExist(err) { continue } png := roleFolder + picurl + ".png" _, err = os.Open(png) if err == nil || os.IsExist(err) { continue } fmt.Println(fields[0]) } }
// ReadDir returns a Dir representing an expanded charm directory. func ReadDir(path string) (dir *Dir, err error) { dir = &Dir{Path: path} file, err := os.Open(dir.join("metadata.yaml")) if err != nil { return nil, err } dir.meta, err = ReadMeta(file) file.Close() if err != nil { return nil, err } file, err = os.Open(dir.join("config.yaml")) if _, ok := err.(*os.PathError); ok { dir.config = NewConfig() } else if err != nil { return nil, err } else { dir.config, err = ReadConfig(file) file.Close() if err != nil { return nil, err } } if file, err = os.Open(dir.join("revision")); err == nil { _, err = fmt.Fscan(file, &dir.revision) file.Close() if err != nil { return nil, errors.New("invalid revision file") } } else { dir.revision = dir.meta.OldRevision } return dir, nil }
// Scaning path for finding plugins that contain files // whith specific suffix func ScanPlugins(path string, suffix string) []*Plugin { var plugins []*Plugin f, err := os.Open(path) if err != nil { log.Warn(err) return nil } defer f.Close() dirs, err := f.Readdirnames(-1) if err != nil { log.Warn(err) return nil } for _, dir := range dirs { dir2 := pt.Join(path, dir) f2, err := os.Open(dir2) if err != nil { log.Warn(err) continue } defer f2.Close() fi, err := f2.Readdir(-1) if err != nil { continue } for _, f := range fi { fn := f.Name() if strings.HasSuffix(fn, suffix) { plugins = append(plugins, NewPlugin(dir2, suffix)) break } } } return plugins }
func Load() Config { file, err := os.Open(Path()) var wConfig Config if err != nil { log.Printf("Config file not found. Creating new...") err = CreateNewConfigFile() if err != nil { log.Fatalf("Unable to create new config file. Error: %v", err) os.Exit(1) } file, err = os.Open(Path()) if err != nil { log.Fatalf("Unable to read new config file. Error: %v", err) os.Exit(1) } } defer file.Close() toml.DecodeReader(file, &wConfig) return wConfig }
func (c *Conn) cget(name string) (data []byte, err error) { cache := filepath.Join(c.cache, name) f, err := os.Open(cache) if err == nil { defer f.Close() return ioutil.ReadAll(f) } if altCache := c.altCachePath(name); altCache != "" { f, err := os.Open(altCache) if err == nil { defer f.Close() return ioutil.ReadAll(f) } } data, err = c.bget(name) if err != nil { return nil, err } dir, _ := filepath.Split(cache) os.MkdirAll(dir, 0700) ioutil.WriteFile(cache, data, 0600) return data, nil }
func LoadManual(version string, funcname string) (io.ReadCloser, error) { pagename := version + ":" + funcname pagename = strings.Replace(pagename, ".", "-", -1) pagename = strings.Replace(pagename, "_", "-", -1) cachepath := path.Join(cachedir, pagename) cachefile, err := os.Open(cachepath) if err == nil { return cachefile, nil } os.MkdirAll(cachedir, 0755) url := "http://api.zeromq.org/" + pagename resp, err := http.Get(url) if err != nil { return nil, err } defer resp.Body.Close() if resp.StatusCode != 200 { return nil, fmt.Errorf("%s -> %s", url, resp.Status) } if cachefile, err = os.Create(cachepath); err != nil { return nil, err } if _, err := io.Copy(cachefile, resp.Body); err != nil { return nil, err } return os.Open(cachepath) }
func renameRelation(c *gc.C, charmPath, oldName, newName string) { path := filepath.Join(charmPath, "metadata.yaml") f, err := os.Open(path) c.Assert(err, jc.ErrorIsNil) defer f.Close() meta, err := corecharm.ReadMeta(f) c.Assert(err, jc.ErrorIsNil) replace := func(what map[string]corecharm.Relation) bool { for relName, relation := range what { if relName == oldName { what[newName] = relation delete(what, oldName) return true } } return false } replaced := replace(meta.Provides) || replace(meta.Requires) || replace(meta.Peers) c.Assert(replaced, gc.Equals, true, gc.Commentf("charm %q does not implement relation %q", charmPath, oldName)) newmeta, err := goyaml.Marshal(meta) c.Assert(err, jc.ErrorIsNil) ioutil.WriteFile(path, newmeta, 0644) f, err = os.Open(path) c.Assert(err, jc.ErrorIsNil) defer f.Close() _, err = corecharm.ReadMeta(f) c.Assert(err, jc.ErrorIsNil) }
func (f FileMigrationSource) FindMigrations() ([]*Migration, error) { migrations := make([]*Migration, 0) file, err := os.Open(f.Dir) if err != nil { return nil, err } files, err := file.Readdir(0) if err != nil { return nil, err } for _, info := range files { if strings.HasSuffix(info.Name(), ".sql") { file, err := os.Open(path.Join(f.Dir, info.Name())) if err != nil { return nil, err } migration, err := ParseMigration(info.Name(), file) if err != nil { return nil, err } migrations = append(migrations, migration) } } // Make sure migrations are sorted sort.Sort(byId(migrations)) return migrations, nil }
func InitializeConfig() { var err error var out io.Writer var erw io.Writer do = definitions.NowDo() if os.Getenv("ERIS_CLI_WRITER") != "" { out, err = os.Open(os.Getenv("ERIS_CLI_WRITER")) if err != nil { fmt.Printf("Could not open: %s\n", err) return } } else { out = os.Stdout } if os.Getenv("ERIS_CLI_ERROR_WRITER") != "" { erw, err = os.Open(os.Getenv("ERIS_CLI_ERROR_WRITER")) if err != nil { fmt.Printf("Could not open: %s\n", err) return } } else { erw = os.Stderr } config.GlobalConfig, err = config.SetGlobalObject(out, erw) if err != nil { fmt.Println(err) os.Exit(1) } }
func MergeColumnsWithChannel() { ch1 := make(chan string, 256) ch2 := make(chan string, 256) f1, err := os.Open(COL1_FILENAME) if err != nil { log.Fatalln("open col1.txt error:", err) } defer f1.Close() f2, err := os.Open(COL2_FILENAME) if err != nil { log.Fatalln("open col2.txt error:", err) } defer f2.Close() mf, err := os.Create(MERGED_FILENAME) if err != nil { log.Fatalln("create merged.txt error:", err) } defer mf.Close() go ScanFile(f1, ch1) go ScanFile(f2, ch2) for { c1, ok1 := <-ch1 if !ok1 { break } c2, ok2 := <-ch2 if !ok2 { break } fmt.Fprintf(mf, "%s\t%s\n", c1, c2) } }
func MergeColumns() { f1, err := os.Open(COL1_FILENAME) if err != nil { log.Fatalln("open col1.txt error:", err) } defer f1.Close() s1 := bufio.NewScanner(f1) f2, err := os.Open(COL2_FILENAME) if err != nil { log.Fatalln("open col2.txt error:", err) } defer f2.Close() s2 := bufio.NewScanner(f2) mf, err := os.Create(MERGED_FILENAME) if err != nil { log.Fatalln("create merged.txt error:", err) } defer mf.Close() for s1.Scan() && s2.Scan() { c1 := s1.Text() c2 := s2.Text() fmt.Fprintf(mf, "%s\t%s\n", c1, c2) } }
// #include processing. func (in *Input) include() { // Find and parse string. tok := in.Stack.Next() if tok != scanner.String { in.expectText("expected string after #include") } name, err := strconv.Unquote(in.Stack.Text()) if err != nil { in.Error("unquoting include file name: ", err) } in.expectNewline("#include") // Push tokenizer for file onto stack. fd, err := os.Open(name) if err != nil { for _, dir := range in.includes { fd, err = os.Open(filepath.Join(dir, name)) if err == nil { break } } if err != nil { in.Error("#include:", err) } } in.Push(NewTokenizer(name, fd, fd)) }
func getfulllist() { fullList = make(map[string]bool) d, err := os.Open(storage) if err != nil { return } defer d.Close() fid, err := d.Readdir(0) if err != nil { log.Fatalln(err) } for _, fi := range fid { if fi.Mode().IsDir() { if curdir, _ := strconv.Atoi(fi.Name()); curdir == 0 { continue } dtbl, err := os.Open(filepath.Join(storage, fi.Name())) if err != nil { log.Fatalln(err) } defer dtbl.Close() walkfunc := func(path string, f os.FileInfo, err error) error { if f.Mode().IsRegular() { fullList[path] = true } return nil } filepath.Walk(filepath.Join(storage, fi.Name()), walkfunc) } } }
func (f *QemuLogFile) Read(p []byte) (n int, err error) { reader, err := os.Open(f.Name) if err != nil { return 0, err } reader.Seek(f.Offset, os.SEEK_SET) for { n, err = reader.Read(p) f.Offset += int64(n) if err == io.EOF { if f.eof { reader.Close() return } time.Sleep(1 * time.Second) reader.Close() reader, err = os.Open(f.Name) if err != nil { reader.Close() return } reader.Seek(f.Offset, os.SEEK_SET) } if err != nil || n != 0 { reader.Close() return } } }
func TtyReady() error { var err error _stdin, err := os.Open("CONIN$") if err != nil { return err } _stdout, err := os.Open("CONOUT$") if err != nil { return err } stdin = os.Stdin stdout = os.Stdout os.Stdin = _stdin os.Stdout = _stdout syscall.Stdin = syscall.Handle(os.Stdin.Fd()) err = setStdHandle(syscall.STD_INPUT_HANDLE, syscall.Stdin) if err != nil { return err } syscall.Stdout = syscall.Handle(os.Stdout.Fd()) err = setStdHandle(syscall.STD_OUTPUT_HANDLE, syscall.Stdout) if err != nil { return err } return nil }
// HttpServerFromFiles stats a HTTP server pretending to be the DSP API // // The sever answers on /Station() with the content of the fiel stationsFilename // and on /Queue() - ignoring queryparames - with the content of the file queueFilename func HttpServerFromFiles(stationsFilename string, queueFilename string, port int) { router := mux.NewRouter() router.HandleFunc("/Station()", func(w http.ResponseWriter, r *http.Request) { logger.Println("Serving stations") stations, err := os.Open(stationsFilename) if err != nil { logger.Println("no stations file found") } w.Header().Set("Content-Type", "Application/JSON; charset=utf-8") io.Copy(w, stations) }) router.HandleFunc("/Queue()", func(w http.ResponseWriter, r *http.Request) { logger.Println("Serving queue") queue, err := os.Open(queueFilename) if err != nil { logger.Println("no queue file found") } w.Header().Set("Content-Type", "Application/JSON; charset=utf-8") io.Copy(w, queue) }) go func() { logger.Fatal(http.ListenAndServe(fmt.Sprintf(":%d", port), router)) }() logger.Printf("Started server on port %d", port) }
// Check that reading large files doesn't lead to large allocations. func TestReadLargeMemCheck(t *testing.T) { ts := NewTestCase(t) defer ts.Cleanup() content := RandomData(385 * 1023) err := ioutil.WriteFile(ts.origFile, []byte(content), 0644) if err != nil { t.Fatalf("WriteFile failed: %v", err) } f, err := os.Open(ts.mountFile) if err != nil { t.Fatalf("Open failed: %v", err) } buf := make([]byte, len(content)+1024) f.Read(buf) if err != nil { t.Fatalf("Read failed: %v", err) } f.Close() runtime.GC() var before, after runtime.MemStats N := 100 runtime.ReadMemStats(&before) for i := 0; i < N; i++ { f, _ := os.Open(ts.mountFile) f.Read(buf) f.Close() } runtime.ReadMemStats(&after) delta := int((after.TotalAlloc - before.TotalAlloc)) delta = (delta - 40000) / N t.Logf("bytes per read loop: %d", delta) }
func (m cdbMap) Visit(visit func(NeedleValue) error) (err error) { fh, err := os.Open(m.fn1) if err != nil { return fmt.Errorf("cannot open %s: %s", m.fn1, err) } defer fh.Close() walk := func(elt cdb.Element) error { if len(elt.Key) != 8 { return nil } return visit(NeedleValue{Key: Key(util.BytesToUint64(elt.Key)), Offset: util.BytesToUint32(elt.Data[:4]), Size: util.BytesToUint32(elt.Data[4:8])}) } if err = cdb.DumpMap(fh, walk); err != nil { return err } //只有一张表 if m.c2 == nil { return nil } fh.Close() if fh, err = os.Open(m.fn2); err != nil { return fmt.Errorf("cannot open %s: %s", m.fn2, err) } return cdb.DumpMap(fh, walk) }
/* ============================================================================================ */ func Copy(source string, dest string) bool { if FileType(source) == "dir" { fi, err := os.Stat(source) setErr(err) _, err = os.Open(dest) MkDir(dest, int(fi.Mode()), true) entries, err := ioutil.ReadDir(source) for _, entry := range entries { sfp := source + "/" + entry.Name() dfp := dest + "/" + entry.Name() Copy(sfp, dfp) } } else { sf, err := os.Open(source) setErr(err) defer sf.Close() df, err := os.Create(dest) setErr(err) defer df.Close() _, err = io.Copy(df, sf) if err == nil { si, err := os.Stat(source) if err != nil { err = os.Chmod(dest, si.Mode()) } } } return true }
// GetLogReader returns a reader for the specified filename. Any // external requests (say from the admin UI via HTTP) must specify // allowAbsolute as false to prevent leakage of non-log // files. Absolute filenames are allowed for the case of the cockroach "log" // command, which provides human readable output from an arbitrary file, // and is intended to be run locally in a terminal. func GetLogReader(filename string, allowAbsolute bool) (io.ReadCloser, error) { if filepath.IsAbs(filename) { if !allowAbsolute { return nil, fmt.Errorf("absolute pathnames are forbidden: %s", filename) } if verifyFile(filename) == nil { return os.Open(filename) } } // Verify there are no path separators in the a non-absolute pathname. if filepath.Base(filename) != filename { return nil, fmt.Errorf("pathnames must be basenames only: %s", filename) } if !logFileRE.MatchString(filename) { return nil, fmt.Errorf("filename is not a cockroach log file: %s", filename) } var reader io.ReadCloser var err error filename = filepath.Join(*logDir, filename) if verifyFile(filename) == nil { reader, err = os.Open(filename) if err == nil { return reader, err } } return nil, err }
func main() { var num1 int var num2 float32 var s string file1, _ := os.Open("hello1.txt") // hello1.txt 파일 열기 defer file1.Close() // main 함수가 끝나기 직전에 파일을 닫음 n, _ := fmt.Fscan(file1, &num1, &num2, &s) // 파일을 읽은 뒤 공백, 개행 문자로 // 구분된 문자열에서 입력을 받음 fmt.Println("입력 개수:", n) // 입력 개수: 3 fmt.Println(num1, num2, s) // 1 1.1 Hello file2, _ := os.Open("hello2.txt") // hello2.txt 파일 열기 defer file2.Close() // main 함수가 끝나기 직전에 파일을 닫음 fmt.Fscanln(file2, &num1, &num2, &s) // 파일을 읽은 뒤 공백으로 // 구분된 문자열에서 입력을 받음 fmt.Println("입력 개수:", n) // 입력 개수: 3 fmt.Println(num1, num2, s) // 1 1.1 Hello file3, _ := os.Open("hello3.txt") // hello3.txt 파일 열기 defer file3.Close() // main 함수가 끝나기 직전에 파일을 닫음 fmt.Fscanf(file3, "%d,%f,%s", &num1, &num2, &s) // 파일을 읽은 뒤 문자열에서 // 형식을 지정하여 입력을 받음 fmt.Println("입력 개수:", n) // 입력 개수: 3 fmt.Println(num1, num2, s) // 1 1.1 Hello }
// ParseKeyFile read a DNSSEC keyfile as generated by dnssec-keygen or other // utilities. It adds ".key" for the public key and ".private" for the private key. func ParseKeyFile(pubFile, privFile string) (*DNSKEY, error) { f, e := os.Open(pubFile) if e != nil { return nil, e } k, e := dns.ReadRR(f, pubFile) if e != nil { return nil, e } f, e = os.Open(privFile) if e != nil { return nil, e } p, e := k.(*dns.DNSKEY).ReadPrivateKey(f, privFile) if e != nil { return nil, e } if v, ok := p.(*rsa.PrivateKey); ok { return &DNSKEY{k.(*dns.DNSKEY), v, k.(*dns.DNSKEY).KeyTag()}, nil } if v, ok := p.(*ecdsa.PrivateKey); ok { return &DNSKEY{k.(*dns.DNSKEY), v, k.(*dns.DNSKEY).KeyTag()}, nil } return &DNSKEY{k.(*dns.DNSKEY), nil, 0}, errors.New("no known? private key found") }
// Compare the output of the C-based peg-markdown, which // is, for each test, available in either a .html or a .mm file accompanying // the .text file, with the output of this package's Markdown processor. func compareOutput(w *bytes.Buffer, f Formatter, ext string, textPath string, p *Parser) (err error) { var bOrig bytes.Buffer r, err := os.Open(textPath) if err != nil { return } defer r.Close() w.Reset() p.Markdown(r, f) // replace .text extension by `ext' base := textPath[:len(textPath)-len(".text")] refPath := base + ext r, err = os.Open(refPath) if err != nil { return } defer r.Close() bOrig.ReadFrom(r) if bytes.Compare(bOrig.Bytes(), w.Bytes()) != 0 { err = fmt.Errorf("test %q failed", refPath) } return }
// loadSpec loads the specification from the provided path. // If the path is empty then the default path will be "config.json" func (r *libcontainerRuntime) loadSpec(cPath, rPath string) (spec *specs.LinuxSpec, rspec *specs.LinuxRuntimeSpec, err error) { cf, err := os.Open(cPath) if err != nil { if os.IsNotExist(err) { return nil, nil, fmt.Errorf("JSON specification file at %s not found", cPath) } return spec, rspec, err } defer cf.Close() rf, err := os.Open(rPath) if err != nil { if os.IsNotExist(err) { return nil, nil, fmt.Errorf("JSON runtime config file at %s not found", rPath) } return spec, rspec, err } defer rf.Close() if err = json.NewDecoder(cf).Decode(&spec); err != nil { return spec, rspec, fmt.Errorf("unmarshal %s: %v", cPath, err) } if err = json.NewDecoder(rf).Decode(&rspec); err != nil { return spec, rspec, fmt.Errorf("unmarshal %s: %v", rPath, err) } return spec, rspec, r.checkSpecVersion(spec) }
func ReadFromDisk() { file, err := os.Open("data/datafile.gob") if err == nil { dec := gob.NewDecoder(file) err = dec.Decode(&data) file.Close() if err != nil { log.Fatal("Unable to decode:", err) } } var packet WritePacket for ii := 0; ii < 128; ii++ { file, err := os.Open("data/datafile.log." + fmt.Sprintf("%03d", ii)) if err != nil { break } else { dec := gob.NewDecoder(file) for dec.Decode(&packet) == nil { data[packet.Key] = packet.Val changeCount++ } file.Close() } } }
func ReadImages(path string) (<-chan image.Image, error) { dir, err := os.Open(path) if err != nil { return nil, err } defer dir.Close() names, err := dir.Readdirnames(0) if err != nil { return nil, err } resChan := make(chan image.Image) go func() { for _, name := range names { imagePath := filepath.Join(path, name) f, err := os.Open(imagePath) if err != nil { continue } image, _, _ := image.Decode(f) f.Close() if image != nil { resChan <- image } } close(resChan) }() return resChan, nil }
func loadSpecConfig() (spec *specs.LinuxSpec, rspec *specs.LinuxRuntimeSpec, err error) { cPath := "config.json" cf, err := os.Open(cPath) if err != nil { if os.IsNotExist(err) { return nil, nil, fmt.Errorf("config.json not found") } } defer cf.Close() rPath := "runtime.json" rf, err := os.Open(rPath) if err != nil { if os.IsNotExist(err) { return nil, nil, fmt.Errorf("runtime.json not found") } } defer rf.Close() if err = json.NewDecoder(cf).Decode(&spec); err != nil { return } if err = json.NewDecoder(rf).Decode(&rspec); err != nil { return } return spec, rspec, nil }
func copyDirToExportWriter(writer ExportWriter, inPath string, outPath string) *model.AppError { dir, err := os.Open(inPath) if err != nil { return model.NewAppError("copyDirToExportWriter", "Unable to open directory", err.Error()) } fileInfoList, err := dir.Readdir(0) if err != nil { return model.NewAppError("copyDirToExportWriter", "Unable to read directory", err.Error()) } for _, fileInfo := range fileInfoList { if fileInfo.IsDir() { copyDirToExportWriter(writer, inPath+"/"+fileInfo.Name(), outPath+"/"+fileInfo.Name()) } else { if toFile, err := writer.Create(outPath + "/" + fileInfo.Name()); err != nil { return model.NewAppError("copyDirToExportWriter", "Unable to open file for export", err.Error()) } else { fromFile, err := os.Open(inPath + "/" + fileInfo.Name()) if err != nil { return model.NewAppError("copyDirToExportWriter", "Unable to open file", err.Error()) } io.Copy(toFile, fromFile) } } } return nil }
// ParseKeyFile read a DNSSEC keyfile as generated by dnssec-keygen or other // utilities. It add ".key" for the public key and ".private" for the private key. func ParseKeyFile(file string) (*dns.DNSKEY, crypto.Signer, error) { f, e := os.Open(file + ".key") if e != nil { return nil, nil, e } k, e := dns.ReadRR(f, file+".key") if e != nil { return nil, nil, e } f, e = os.Open(file + ".private") if e != nil { return nil, nil, e } p, e := k.(*dns.DNSKEY).ReadPrivateKey(f, file+".private") if e != nil { return nil, nil, e } if v, ok := p.(*rsa.PrivateKey); ok { return k.(*dns.DNSKEY), v, nil } if v, ok := p.(*ecdsa.PrivateKey); ok { return k.(*dns.DNSKEY), v, nil } return k.(*dns.DNSKEY), nil, nil }
func applyRepoPatches(chromiumSrcDir, runID string) error { // Apply Skia patch. skiaDir := filepath.Join(chromiumSrcDir, "third_party", "skia") skiaPatch := filepath.Join(os.TempDir(), runID+".skia.patch") skiaPatchFile, _ := os.Open(skiaPatch) skiaPatchFileInfo, _ := skiaPatchFile.Stat() if skiaPatchFileInfo.Size() > 10 { if err := ApplyPatch(skiaPatch, skiaDir); err != nil { return fmt.Errorf("Could not apply Skia's patch in %s: %s", skiaDir, err) } } // Apply Blink patch. blinkDir := filepath.Join(chromiumSrcDir, "third_party", "WebKit") blinkPatch := filepath.Join(os.TempDir(), runID+".blink.patch") blinkPatchFile, _ := os.Open(blinkPatch) blinkPatchFileInfo, _ := blinkPatchFile.Stat() if blinkPatchFileInfo.Size() > 10 { if err := ApplyPatch(blinkPatch, blinkDir); err != nil { return fmt.Errorf("Could not apply Blink's patch in %s: %s", blinkDir, err) } } // Apply Chromium patch. chromiumPatch := filepath.Join(os.TempDir(), runID+".chromium.patch") chromiumPatchFile, _ := os.Open(chromiumPatch) chromiumPatchFileInfo, _ := chromiumPatchFile.Stat() if chromiumPatchFileInfo.Size() > 10 { if err := ApplyPatch(chromiumPatch, chromiumSrcDir); err != nil { return fmt.Errorf("Could not apply Chromium's patch in %s: %s", chromiumSrcDir, err) } } return nil }