func (m *Muxer) writeAudioFrame(frame *C.AVFrame) bool { for C.av_audio_fifo_size(m.fifo) < 1024 { // generate & store in fifo C.fill_audio_frame(frame, m.audioStream.stream.codec) frame_size := frame.nb_samples if C.av_audio_fifo_realloc(m.fifo, C.av_audio_fifo_size(m.fifo)+frame_size) < 0 { return false } if C.av_audio_fifo_write(m.fifo, (*unsafe.Pointer)(unsafe.Pointer(&frame.data[0])), frame_size) < frame_size { return false } } got_packet := C.int(0) for C.av_audio_fifo_size(m.fifo) >= 1024 { // read & encode & write frame_size := C.min(C.av_audio_fifo_size(m.fifo), m.audioStream.stream.codec.frame_size) output_frame := C.alloc_audio_frame(m.audioStream.stream.codec) if C.av_audio_fifo_read(m.fifo, (*unsafe.Pointer)(unsafe.Pointer(&output_frame.data[0])), frame_size) < frame_size { C.av_frame_free(&output_frame) return false } pkt := C.AVPacket{} C.av_init_packet(&pkt) output_frame.pts = C.int64_t(m.audioStream.ts) m.audioStream.ts += int(m.audioStream.stream.codec.frame_size) if C.avcodec_encode_audio2(m.audioStream.stream.codec, &pkt, frame, &got_packet) < 0 { C.av_free_packet(&pkt) return false } if got_packet == 0 { continue } C.av_packet_rescale_ts(&pkt, m.audioStream.stream.codec.time_base, m.audioStream.stream.time_base) pkt.stream_index = m.audioStream.stream.index if C.av_interleaved_write_frame(m.context, &pkt) < 0 { return false } } return true }
func (g *Generator) NextFrame() (image.Image, int64, error) { img := image.NewRGBA(image.Rect(0, 0, g.Width, g.Height)) frame := C.av_frame_alloc() var pkt C.struct_AVPacket var frameFinished C.int for C.av_read_frame(g.avfContext, &pkt) == 0 { if int(pkt.stream_index) != g.vStreamIndex { C.av_free_packet(&pkt) continue } if C.avcodec_decode_video2(g.avcContext, frame, &frameFinished, &pkt) <= 0 { C.av_free_packet(&pkt) return nil, 0, errors.New("can't decode frame") } C.av_free_packet(&pkt) if frameFinished == 0 { continue } ctx := C.sws_getContext( C.int(g.Width), C.int(g.Height), g.avcContext.pix_fmt, C.int(g.Width), C.int(g.Height), C.PIX_FMT_RGBA, C.SWS_BICUBIC, nil, nil, nil, ) if ctx == nil { return nil, 0, errors.New("can't allocate scaling context") } srcSlice := (**C.uint8_t)(&frame.data[0]) srcStride := (*C.int)(&frame.linesize[0]) dst := (**C.uint8_t)(unsafe.Pointer(&img.Pix)) dstStride := (*C.int)(unsafe.Pointer(&[1]int{img.Stride})) C.sws_scale( ctx, srcSlice, srcStride, C.int(0), g.avcContext.height, dst, dstStride, ) break } timestamp := int64(C.av_frame_get_best_effort_timestamp(frame)) return img, timestamp, nil }
func (dpx *Demultiplexer) Start() { for true { dpx.Packet = new(C.AVPacket) C.av_init_packet(dpx.Packet) if C.av_read_frame(dpx.Ds.Ctx, dpx.Packet) >= 0 { C.av_dup_packet(dpx.Packet) } else { println("end of file reached, closing channels") for i := 0; i < len(*dpx.tracks); i++ { print("closing channel ") println(i) close((*dpx.tracks)[i].stream) } break } var re Packet re.Pts = int64(dpx.Packet.pts) re.Dts = int64(dpx.Packet.dts) re.Size = int(dpx.Packet.size) re.Data = make([]byte, re.Size) data := (*(*[1 << 30]byte)(unsafe.Pointer(dpx.Packet.data)))[0:dpx.Packet.size] for i := 0; i < re.Size; i++ { re.Data[i] = data[i] } re.Stream = int(dpx.Packet.stream_index) re.Flags = int(dpx.Packet.flags) re.Duration = int(dpx.Packet.duration) re.Pos = int64(dpx.Packet.pos) C.av_free_packet(dpx.Packet) (*dpx.tracks)[re.Stream].stream <- re } //re * Packet }
func (id *Capture) Read(frame *C.AVFrame) error { if frame == (*C.AVFrame)(null) { return fmt.Errorf("buffer error") } pkt := C.AVPacket{} C.av_init_packet(&pkt) defer C.av_free_packet(&pkt) if C.av_read_frame(id.context, &pkt) < 0 { return fmt.Errorf("read frame error") } if int(pkt.stream_index) != id.index { return fmt.Errorf("not video frame") } got_frame := C.int(0) if C.avcodec_decode_video2(id.codec, id.frame, &got_frame, &pkt) < 0 { return fmt.Errorf("decode frame error") } if got_frame != 0 { if C.sws_scale(id.sws, (**C.uint8_t)(&(id.frame.data[0])), &id.frame.linesize[0], 0, id.codec.height, &frame.data[0], &frame.linesize[0]) >= 0 { return nil } return fmt.Errorf("scale error") } return fmt.Errorf("no frame out") }
func (m *H264Encoder) Encode(img *image.YCbCr) (out h264Out, err error) { var f *C.AVFrame if img == nil { f = nil } else { if img.SubsampleRatio != m.Pixfmt { err = errors.New("image pixfmt not match") return } if img.Rect.Dx() != m.W || img.Rect.Dy() != m.H { err = errors.New("image size not match") return } f = m.m.f f.data[0] = (*C.uint8_t)(unsafe.Pointer(&img.Y[0])) f.data[1] = (*C.uint8_t)(unsafe.Pointer(&img.Cb[0])) f.data[2] = (*C.uint8_t)(unsafe.Pointer(&img.Cr[0])) f.linesize[0] = (C.int)(img.YStride) f.linesize[1] = (C.int)(img.CStride) f.linesize[2] = (C.int)(img.CStride) } C.av_init_packet(&m.m.pkt) r := C.avcodec_encode_video2(m.m.ctx, &m.m.pkt, f, &m.m.got) defer C.av_free_packet(&m.m.pkt) if int(r) < 0 { err = errors.New("encode failed") return } if m.m.got == 0 { err = errors.New("no picture") return } if m.m.pkt.size == 0 { err = errors.New("packet size == 0") return } out.Data = make([]byte, m.m.pkt.size) C.memcpy( unsafe.Pointer(&out.Data[0]), unsafe.Pointer(m.m.pkt.data), (C.size_t)(m.m.pkt.size), ) out.Key = (m.m.pkt.flags & C.AV_PKT_FLAG_KEY) != 0 return }
func (stream *Stream) decode() { for packet := range stream.packets { if packet == nil { close(stream.Frames) return } stream.frame.Defaults() gotFrame, err := stream.decodeF(stream.cdcctx, stream.frame, packet) C.av_free_packet(packet) if err != nil { // ignore frame continue } if gotFrame { stream.Frames <- stream.frame } } }
func (c *Decoder) decodeAudio(p Packet) *Frame { packet := new(C.AVPacket) C.av_init_packet(packet) defer C.av_free_packet(packet) packet.pts = C.int64_t(p.Pts) packet.dts = C.int64_t(p.Dts) packet.size = C.int(p.Size) packet.data = (*C.uint8_t)(unsafe.Pointer(&p.Data[0])) packet.stream_index = C.int(p.Stream) packet.flags = C.int(p.Flags) packet.duration = C.int(p.Duration) packet.pos = C.int64_t(p.Pos) //size:=packet.size; samples_size := C.int(C.AVCODEC_MAX_AUDIO_FRAME_SIZE) //bps := C.av_get_bits_per_sample_fmt(c.Ctx.sample_fmt) >> 3; outbuf := (*C.uint8_t)(C.av_malloc(C.uint(samples_size))) defer C.av_free(unsafe.Pointer(outbuf)) C.avcodec_decode_audio3(c.Ctx, (*C.int16_t)(unsafe.Pointer(outbuf)), &samples_size, packet) //println(data_len) return nil }
func (m *Muxer) writeVideoFrame(frame *C.AVFrame) bool { if m.capture.Read(frame) != nil { return false } if m.display != nil { m.display.Render(frame) } pkt := C.AVPacket{} C.av_init_packet(&pkt) frame.pts = C.int64_t(m.videoStream.ts) m.videoStream.ts++ got_packet := C.int(0) if C.avcodec_encode_video2(m.videoStream.stream.codec, &pkt, frame, &got_packet) < 0 { C.av_free_packet(&pkt) return false } if got_packet == 0 { return false } C.av_packet_rescale_ts(&pkt, m.videoStream.stream.codec.time_base, m.videoStream.stream.time_base) pkt.stream_index = m.videoStream.stream.index return C.av_interleaved_write_frame(m.context, &pkt) == 0 }
func (file *MediaFile) StartDecoding() { for _, i := range file.DecodedStreams { go file.Streams[i].decode() } go func() { get_packets: for packet := range file.packets { if packet == nil { break get_packets } for _, i := range file.DecodedStreams { if packet.stream_index == C.int(i) { file.Streams[i].packets <- packet continue get_packets } } C.av_free_packet(packet) } for _, i := range file.DecodedStreams { file.Streams[i].packets <- nil } }() go func() { for { packet := new(C.AVPacket) C.av_init_packet(packet) if C.av_read_frame(file.fmtctx, packet) < 0 { // assume EOF file.packets <- nil return } file.packets <- packet } }() }
func (dpx *Demultiplexer) ReadPacket(re *Packet) bool { dpx.Packet = new(C.AVPacket) C.av_init_packet(dpx.Packet) defer C.av_free_packet(dpx.Packet) if C.av_read_frame(dpx.Ds.Ctx, dpx.Packet) >= 0 { C.av_dup_packet(dpx.Packet) } else { return false } re.Pts = int64(dpx.Packet.pts) re.Dts = int64(dpx.Packet.dts) re.Size = int(dpx.Packet.size) re.Data = make([]byte, re.Size) data := (*(*[1 << 30]byte)(unsafe.Pointer(dpx.Packet.data)))[0:dpx.Packet.size] for i := 0; i < re.Size; i++ { re.Data[i] = data[i] } re.Stream = int(dpx.Packet.stream_index) re.Flags = int(dpx.Packet.flags) re.Duration = int(dpx.Packet.duration) re.Pos = int64(dpx.Packet.pos) return true }
func av_free_packet2(p *avPacket) { if p != nil { C.av_free_packet((*C.AVPacket)(unsafe.Pointer(p))) p = nil } }
func av_free_packet(p *Packet) { if p.avpacket != nil { C.av_free_packet(p.avpacket) p.avpacket = nil } }
//Free a packet. func (p *Packet) AvFreePacket() { C.av_free_packet((*C.struct_AVPacket)(p)) }
func (this *Packet) Free() { C.av_free_packet(&this.avPacket) }
func main() { var ( fmt_ctx *C.AVFormatContext video_stream_idx C.int pkt C.AVPacket fn string ) flag.StringVar(&fn, "i", fn, "Input filename") flag.Parse() if fn == "" { flag.PrintDefaults() os.Exit(1) } cfn := C.CString(fn) defer C.free(unsafe.Pointer(cfn)) C.av_register_all() if err := C.avformat_open_input(&fmt_ctx, cfn, nil, nil); err < 0 { log.Fatalf("Could not open source file %s, %d\n", fn, err) } // The smd codecs aren't too happy with missing PTS fmt_ctx.flags |= C.AVFMT_FLAG_GENPTS defer C.avformat_close_input(&fmt_ctx) if err := C.avformat_find_stream_info(fmt_ctx, nil); err < 0 { log.Fatalf("Could not find stream information: %d", err) } if err := open_codec_context(&video_stream_idx, fmt_ctx, C.AVMEDIA_TYPE_VIDEO); err < 0 { log.Fatalf("Could not open codec context: %d", err) } log.Printf("fmt_ctx: %+v", fmt_ctx) streams := (*[32]*C.AVStream)(unsafe.Pointer(fmt_ctx.streams)) log.Printf("video stream codec: %+v", streams[video_stream_idx].codec.codec_id) log.Printf("time_base: %+v", streams[video_stream_idx].time_base) num := 1000000 * float64(streams[video_stream_idx].time_base.num) den := float64(streams[video_stream_idx].time_base.den) var codec C.ismd_codec_type_t switch vc := streams[video_stream_idx].codec.codec_id; vc { case C.AV_CODEC_ID_H264: codec = C.ISMD_CODEC_TYPE_H264 case C.AV_CODEC_ID_MPEG1VIDEO: fallthrough case C.AV_CODEC_ID_MPEG2VIDEO: codec = C.ISMD_CODEC_TYPE_MPEG2 case C.AV_CODEC_ID_MPEG4: codec = C.ISMD_CODEC_TYPE_MPEG4 default: log.Fatalf("Unhandled video codec: %d", vc) } Init(codec, C.GDL_PLANE_ID_UPP_C) defer Destroy() C.av_init_packet(&pkt) pkt.data = nil pkt.size = 0 running := true go func() { os.Stdin.Read(make([]byte, 1)) running = false }() frame := 0 for running && C.av_read_frame(fmt_ctx, &pkt) >= 0 { orig_pkt := pkt wrote := false for pkt.stream_index == video_stream_idx && (pkt.size > 0) { pts := num * float64(pkt.pts) / den WriteToInputPort(uintptr(unsafe.Pointer(pkt.data)), C.size_t(pkt.size), pts, 32*1024) wrote = true break } if wrote { frame++ if frame%100 == 0 { var stat C.ismd_vidrend_stats_t C.ismd_vidrend_get_stats(m_video_render, &stat) log.Printf("%+v", stat) } } C.av_free_packet(&orig_pkt) } }
//Free a packet. //void av_free_packet (AVPacket *pkt) func Av_free_packet(p *AVPacket) { C.av_free_packet((*C.struct_AVPacket)(p)) }
func (self *Decoder) Start(videoStream, audioStream *C.AVStream, scaleWidth, scaleHeight C.int) *Decoder { self.running = true self.duration = time.Duration(self.FormatContext.duration * C.AV_TIME_BASE / 1000) vCodecCtx := videoStream.codec aCodecCtx := audioStream.codec self.durationPerSample = time.Second / time.Duration(aCodecCtx.sample_rate) // frame pool poolSize := 16 pool := make(chan *C.AVFrame, poolSize) self.pool = pool numBytes := C.size_t(C.avpicture_get_size(C.PIX_FMT_YUV420P, scaleWidth, scaleHeight)) for i := 0; i < poolSize; i++ { frame := C.av_frame_alloc() self.frames = append(self.frames, frame) buffer := (*C.uint8_t)(unsafe.Pointer(C.av_malloc(numBytes))) self.buffers = append(self.buffers, buffer) C.avpicture_fill((*C.AVPicture)(unsafe.Pointer(frame)), buffer, C.PIX_FMT_YUV420P, scaleWidth, scaleHeight) pool <- frame } // decode self.frameChan = make(chan *C.AVFrame, 512) go func() { runtime.LockOSThread() // scale context scaleContext := C.sws_getCachedContext(nil, vCodecCtx.width, vCodecCtx.height, vCodecCtx.pix_fmt, scaleWidth, scaleHeight, C.PIX_FMT_YUV420P, C.SWS_LANCZOS, nil, nil, nil) if scaleContext == nil { log.Fatal("get scale context failed") } // resample context resampleContext := C.swr_alloc_set_opts(nil, C.AV_CH_LAYOUT_STEREO, C.AV_SAMPLE_FMT_FLT, aCodecCtx.sample_rate, C.int64_t(aCodecCtx.channel_layout), aCodecCtx.sample_fmt, aCodecCtx.sample_rate, 0, nil) if resampleContext == nil { log.Fatal("get resample context failed") } C.swr_init(resampleContext) var packet C.AVPacket var frameFinished C.int var pts int64 var packetTime time.Duration vFrame := C.av_frame_alloc() aFrame := C.av_frame_alloc() videoIndex := videoStream.index audioIndex := audioStream.index resampleBuffer := (*C.uint8_t)(C.av_malloc(4096 * 8)) resampleBufferp := &resampleBuffer self.Timer = NewTimer() // decode for self.running { // seek if self.seekTarget > 0 { if C.av_seek_frame(self.FormatContext, -1, C.int64_t(float64(self.seekNext)/float64(time.Second)*float64(C.AV_TIME_BASE)), C.AVSEEK_FLAG_BACKWARD) < 0 { log.Fatal("seek error") } for _, codecCtx := range self.openedCodecs { C.avcodec_flush_buffers(codecCtx) } p("frame seek done\n") } read_packet: // read packet C.av_free_packet(&packet) if C.av_read_frame(self.FormatContext, &packet) < 0 { // read packet log.Fatal("read frame error") //TODO stop gracefully } // get packet time if packet.dts != C.AV_NOPTS_VALUE { pts = int64(packet.dts) } else { pts = 0 } if packet.stream_index == videoIndex { packetTime = time.Duration(float64(pts) * float64(C.av_q2d(videoStream.time_base)) * float64(time.Second)) } else if packet.stream_index == audioIndex { packetTime = time.Duration(float64(pts) * float64(C.av_q2d(audioStream.time_base)) * float64(time.Second)) } else { // ignore packet goto read_packet } p("packet time %v at timer time %v\n", packetTime, self.Timer.Now()) // check seek if self.seekTarget > 0 && packetTime > 0 { // if packet time cannot determined, skip if packetTime < self.seekTarget { // seek again self.seekNext += self.seekStep p("seek again %v\n", self.seekNext) } else { // seek ok p("seek ok\n") self.seekTarget = 0 self.Timer.Jump(packetTime - self.Timer.Now()) } } // decode if packet.stream_index == videoIndex { // decode video if C.avcodec_decode_video2(vCodecCtx, vFrame, &frameFinished, &packet) < 0 { continue // bad packet } if frameFinished <= 0 { goto read_packet // frame not complete } bufFrame := <-pool // get frame buffer C.sws_scale(scaleContext, // scale &vFrame.data[0], &vFrame.linesize[0], 0, vCodecCtx.height, &bufFrame.data[0], &bufFrame.linesize[0]) bufFrame.pts = C.int64_t(packetTime) // set packet time self.frameChan <- bufFrame // push to queue p("video frame %v\n", packetTime) } else if packet.stream_index == audioIndex { // decode audio decode_audio_packet: l := C.avcodec_decode_audio4(aCodecCtx, aFrame, &frameFinished, &packet) if l < 0 { continue // bad packet } if frameFinished <= 0 { goto read_packet // frame not complete } if frameFinished > 0 { // frame finished n := C.swr_convert(resampleContext, resampleBufferp, 4096, aFrame.extended_data, aFrame.nb_samples) if n != aFrame.nb_samples { log.Fatal("audio resample failed") } self.audioFrames <- &AudioFrame{ time: packetTime, data: C.GoBytes(unsafe.Pointer(resampleBuffer), n*8), } } if l != packet.size { // multiple frame packet packet.size -= l packet.data = (*C.uint8_t)(unsafe.Pointer(uintptr(unsafe.Pointer(packet.data)) + uintptr(l))) goto decode_audio_packet } p("audio frame %v\n", packetTime) } else { // other stream goto read_packet } } }() // sync video go func() { maxDelta := (time.Second / time.Duration(C.av_q2d(videoStream.r_frame_rate)/10)) for frame := range self.frameChan { delta := time.Duration(frame.pts) - self.Timer.Now() p("video frame %v, delta %v, max delta %v\n", time.Duration(frame.pts), delta, maxDelta) if delta > 0 { if delta > maxDelta { self.Timer.Jump(delta) print("timer jumped\n") } else { time.Sleep(delta) } } else if delta < 0 { // drop frame self.RecycleFrame(frame) continue } self.timedFrames <- frame } }() return self }
// ImageWxH returns a screenshot at the ts milliseconds, scaled to the specified width and height. func (g *Generator) ImageWxH(ts int64, width, height int) (image.Image, error) { img := image.NewRGBA(image.Rect(0, 0, width, height)) frame := C.av_frame_alloc() defer C.av_frame_free(&frame) frameNum := C.av_rescale( C.int64_t(ts), C.int64_t(g.streams[g.vStreamIndex].time_base.den), C.int64_t(g.streams[g.vStreamIndex].time_base.num), ) / 1000 if C.avformat_seek_file( g.avfContext, C.int(g.vStreamIndex), 0, frameNum, frameNum, C.AVSEEK_FLAG_FRAME, ) < 0 { return nil, errors.New("can't seek to timestamp") } C.avcodec_flush_buffers(g.avcContext) var pkt C.struct_AVPacket var frameFinished C.int for C.av_read_frame(g.avfContext, &pkt) == 0 { if int(pkt.stream_index) != g.vStreamIndex { C.av_free_packet(&pkt) continue } if C.avcodec_decode_video2(g.avcContext, frame, &frameFinished, &pkt) <= 0 { C.av_free_packet(&pkt) return nil, errors.New("can't decode frame") } C.av_free_packet(&pkt) if frameFinished == 0 || pkt.dts < frameNum { continue } ctx := C.sws_getContext( C.int(g.Width), C.int(g.Height), g.avcContext.pix_fmt, C.int(width), C.int(height), C.PIX_FMT_RGBA, C.SWS_BICUBIC, nil, nil, nil, ) if ctx == nil { return nil, errors.New("can't allocate scaling context") } srcSlice := (**C.uint8_t)(&frame.data[0]) srcStride := (*C.int)(&frame.linesize[0]) dst := (**C.uint8_t)(unsafe.Pointer(&img.Pix)) dstStride := (*C.int)(unsafe.Pointer(&[1]int{img.Stride})) C.sws_scale( ctx, srcSlice, srcStride, 0, g.avcContext.height, dst, dstStride, ) break } return img, nil }
func (c *Decoder) decodeVideo(p Packet) *Frame { if !c.Valid { //println("try to decode with an invalid Decoder") return nil } //println("try to decode with a valid Decoder") packet := new(C.AVPacket) C.av_init_packet(packet) defer C.av_free_packet(packet) packet.pts = C.int64_t(p.Pts) packet.dts = C.int64_t(p.Dts) packet.size = C.int(p.Size) //packet.data=&C.uint8_t(make([]byte, p.Size)[0]) //myBytes:=make([]byte,p.Size); /* var pb *byte if(len(p.Data)>0){ //println("Data > 0") pb=&p.Data[0] }*/ packet.data = (*C.uint8_t)(unsafe.Pointer(&p.Data[0])) // println(p.Data) // println(pb) // println(packet.data) // packet.data=unsafe.Pointer(p.Data) // for i:= 0; i < re.Size; i++ { // re.Data[i] = data[i]; // } packet.stream_index = C.int(p.Stream) packet.flags = C.int(p.Flags) packet.duration = C.int(p.Duration) packet.pos = C.int64_t(p.Pos) height := c.Ctx.height width := c.Ctx.width var frame *Frame = new(Frame) numBytes := C.avpicture_get_size(0, width, height) //_size = numBytes; //_buffer := (*C.uint8_t) (C.av_malloc(C.uint(numBytes))); if numBytes > 0 { var buffer []byte = make([]byte, numBytes) var pbuffer *byte = &buffer[0] // C.memset((*C.uint8_t)(_buffer), 0, C.size_t(numBytes)); // Assign appropriate parts of buffer to image planes //var picture * C.AVPicture=(*C.AVPicture)(unsafe.Pointer(frame)) //println(frame) //println(picture) //:=&frame //854x480 C.avpicture_fill((*C.AVPicture)(unsafe.Pointer(frame)), (*C.uint8_t)(unsafe.Pointer(pbuffer)), 0, width, height) } //frameFinished:=C.int(0) var frameFinished *C.int = new(C.int) //bytes_decoded:= C.avcodec_decode_video2(c.Ctx, (*C.AVFrame)(unsafe.Pointer(frame)), frameFinished, packet) frame.pts = packet.pts //frame.dts=packet.dts /* //C.avcodec_decode_video2(nil,nil,nil,nil) header:=fmt.Sprintf("P5\n%d %d\n255\n",width,height) file, err := os.Open("test.ppm", os.O_CREATE|os.O_TRUNC|os.O_RDWR, 0666) if(err!=nil){ println("error while openning file") } file.WriteString(header) linesize:=int(frame.linesize[0]) //println(linesize) data:=make([]byte, width) tmpdata:=(*(*[1<<30]byte)(unsafe.Pointer(frame.data[0])))[0:numBytes] // for i:= 0; i < 1; i++ { // data[i] = tmpdata[i]; // } for i := 0; i < int(height); i++{ for a:= 0; a < int(width); a++ { data[a] = tmpdata[(i*linesize)+a]; } file.Write(data); } file.Close()*/ //ioutil.WriteFile("test.data", header,0755) //println("bla fasel") //println(bytes_decoded) //println(*frameFinished) return frame }