예제 #1
1
func (m *Muxer) writeAudioFrame(frame *C.AVFrame) bool {
	for C.av_audio_fifo_size(m.fifo) < 1024 { // generate & store in fifo
		C.fill_audio_frame(frame, m.audioStream.stream.codec)
		frame_size := frame.nb_samples
		if C.av_audio_fifo_realloc(m.fifo, C.av_audio_fifo_size(m.fifo)+frame_size) < 0 {
			return false
		}
		if C.av_audio_fifo_write(m.fifo, (*unsafe.Pointer)(unsafe.Pointer(&frame.data[0])), frame_size) < frame_size {
			return false
		}
	}
	got_packet := C.int(0)
	for C.av_audio_fifo_size(m.fifo) >= 1024 { // read & encode & write
		frame_size := C.min(C.av_audio_fifo_size(m.fifo), m.audioStream.stream.codec.frame_size)
		output_frame := C.alloc_audio_frame(m.audioStream.stream.codec)
		if C.av_audio_fifo_read(m.fifo, (*unsafe.Pointer)(unsafe.Pointer(&output_frame.data[0])), frame_size) < frame_size {
			C.av_frame_free(&output_frame)
			return false
		}
		pkt := C.AVPacket{}
		C.av_init_packet(&pkt)
		output_frame.pts = C.int64_t(m.audioStream.ts)
		m.audioStream.ts += int(m.audioStream.stream.codec.frame_size)
		if C.avcodec_encode_audio2(m.audioStream.stream.codec, &pkt, frame, &got_packet) < 0 {
			C.av_free_packet(&pkt)
			return false
		}
		if got_packet == 0 {
			continue
		}
		C.av_packet_rescale_ts(&pkt, m.audioStream.stream.codec.time_base, m.audioStream.stream.time_base)
		pkt.stream_index = m.audioStream.stream.index
		if C.av_interleaved_write_frame(m.context, &pkt) < 0 {
			return false
		}
	}
	return true
}
예제 #2
0
func (m *Muxer) routine() {
	vFrame := C.alloc_video_frame(m.videoStream.stream.codec)
	if vFrame == (*C.AVFrame)(null) {
		m.done <- true
		return
	}
	aFrame := C.alloc_audio_frame(m.audioStream.stream.codec)
	if aFrame == (*C.AVFrame)(null) {
		m.done <- true
		return
	}
	for m.loop {
		if C.av_compare_ts(C.int64_t(m.videoStream.ts), m.videoStream.stream.codec.time_base,
			C.int64_t(m.audioStream.ts), m.audioStream.stream.codec.time_base) <= 0 {
			m.writeVideoFrame(vFrame)
		} else {
			m.writeAudioFrame(aFrame)
		}
		time.Sleep(time.Millisecond * 30)
	}
	if vFrame != (*C.AVFrame)(null) {
		C.av_frame_free(&vFrame)
	}
	if aFrame != (*C.AVFrame)(null) {
		C.av_frame_free(&aFrame)
	}
	m.done <- true
}
예제 #3
0
파일: decoder.go 프로젝트: reusee/player
func (self *Decoder) Close() {
	C.avformat_close_input(&self.FormatContext)
	for _, stream := range self.Streams {
		C.avcodec_close(stream.codec)
	}
	for _, frame := range self.frames {
		C.av_frame_free(&frame)
	}
	for _, buffer := range self.buffers {
		C.av_free(unsafe.Pointer(buffer))
	}
	close(self.frameChan)
	self.running = false
}
예제 #4
0
파일: avutil.go 프로젝트: codesuki/go-libav
func (f *Frame) Free() {
	if f.CAVFrame != nil {
		defer C.av_frame_free(&f.CAVFrame)
		f.CAVFrame = nil
	}
}
예제 #5
0
파일: frame.go 프로젝트: davidoram/gmf
func (this *Frame) Free() {
	C.av_frame_free(&this.avFrame)
}
예제 #6
0
func (id *Capture) Close() {
	C.av_frame_free(&(id.frame))
	C.avcodec_close(id.codec)
	C.avformat_close_input(&(id.context))
	C.sws_freeContext(id.sws)
}
예제 #7
0
// ImageWxH returns a screenshot at the ts milliseconds, scaled to the specified width and height.
func (g *Generator) ImageWxH(ts int64, width, height int) (image.Image, error) {
	img := image.NewRGBA(image.Rect(0, 0, width, height))
	frame := C.av_frame_alloc()
	defer C.av_frame_free(&frame)
	frameNum := C.av_rescale(
		C.int64_t(ts),
		C.int64_t(g.streams[g.vStreamIndex].time_base.den),
		C.int64_t(g.streams[g.vStreamIndex].time_base.num),
	) / 1000
	if C.avformat_seek_file(
		g.avfContext,
		C.int(g.vStreamIndex),
		0,
		frameNum,
		frameNum,
		C.AVSEEK_FLAG_FRAME,
	) < 0 {
		return nil, errors.New("can't seek to timestamp")
	}
	C.avcodec_flush_buffers(g.avcContext)
	var pkt C.struct_AVPacket
	var frameFinished C.int
	for C.av_read_frame(g.avfContext, &pkt) == 0 {
		if int(pkt.stream_index) != g.vStreamIndex {
			C.av_free_packet(&pkt)
			continue
		}
		if C.avcodec_decode_video2(g.avcContext, frame, &frameFinished, &pkt) <= 0 {
			C.av_free_packet(&pkt)
			return nil, errors.New("can't decode frame")
		}
		C.av_free_packet(&pkt)
		if frameFinished == 0 || pkt.dts < frameNum {
			continue
		}
		ctx := C.sws_getContext(
			C.int(g.Width),
			C.int(g.Height),
			g.avcContext.pix_fmt,
			C.int(width),
			C.int(height),
			C.PIX_FMT_RGBA,
			C.SWS_BICUBIC,
			nil,
			nil,
			nil,
		)
		if ctx == nil {
			return nil, errors.New("can't allocate scaling context")
		}
		srcSlice := (**C.uint8_t)(&frame.data[0])
		srcStride := (*C.int)(&frame.linesize[0])
		dst := (**C.uint8_t)(unsafe.Pointer(&img.Pix))
		dstStride := (*C.int)(unsafe.Pointer(&[1]int{img.Stride}))
		C.sws_scale(
			ctx,
			srcSlice,
			srcStride,
			0,
			g.avcContext.height,
			dst,
			dstStride,
		)
		break
	}
	return img, nil
}
예제 #8
0
파일: frame.go 프로젝트: stephenwithav/goav
//void av_frame_free (AVFrame **frame)
//Free the frame and any dynamically allocated objects in it, e.g.
func Av_frame_free(f *AVFrame) {
	C.av_frame_free((**C.struct_AVFrame)(unsafe.Pointer(f)))
}
예제 #9
0
파일: frame.go 프로젝트: ovr/goav
//Free the frame and any dynamically allocated objects in it, e.g.
func AvFrameFree(f *Frame) {
	C.av_frame_free((**C.struct_AVFrame)(unsafe.Pointer(&f)))
}