コード例 #1
0
ファイル: capture.go プロジェクト: qianbo0423/media-muxer
func (id *Capture) Read(frame *C.AVFrame) error {
	if frame == (*C.AVFrame)(null) {
		return fmt.Errorf("buffer error")
	}
	pkt := C.AVPacket{}
	C.av_init_packet(&pkt)
	defer C.av_free_packet(&pkt)
	if C.av_read_frame(id.context, &pkt) < 0 {
		return fmt.Errorf("read frame error")
	}
	if int(pkt.stream_index) != id.index {
		return fmt.Errorf("not video frame")
	}
	got_frame := C.int(0)
	if C.avcodec_decode_video2(id.codec, id.frame, &got_frame, &pkt) < 0 {
		return fmt.Errorf("decode frame error")
	}
	if got_frame != 0 {
		if C.sws_scale(id.sws, (**C.uint8_t)(&(id.frame.data[0])), &id.frame.linesize[0], 0, id.codec.height, &frame.data[0], &frame.linesize[0]) >= 0 {
			return nil
		}
		return fmt.Errorf("scale error")
	}
	return fmt.Errorf("no frame out")
}
コード例 #2
0
ファイル: swscale.go プロジェクト: gale320/goav
////Scale the image slice in srcSlice and put the resulting scaled slice in the image in dst.
func SwsScale(ctxt *Context, src *uint8, str int, y, h int, d *uint8, ds int) int {
	cctxt := (*C.struct_SwsContext)(unsafe.Pointer(ctxt))
	csrc := (*C.uint8_t)(unsafe.Pointer(src))
	cstr := (*C.int)(unsafe.Pointer(&str))
	cd := (*C.uint8_t)(unsafe.Pointer(d))
	cds := (*C.int)(unsafe.Pointer(&ds))
	return int(C.sws_scale(cctxt, &csrc, cstr, C.int(y), C.int(h), &cd, cds))
}
コード例 #3
0
ファイル: sws.go プロジェクト: Dim0N22/gmf
func (this *SwsCtx) Scale(src *Frame, dst *Frame) {
	C.sws_scale(
		this.swsCtx,
		(**C.uint8_t)(unsafe.Pointer(&src.avFrame.data)),
		(*_Ctype_int)(unsafe.Pointer(&src.avFrame.linesize)),
		0,
		C.int(src.Height()),
		(**C.uint8_t)(unsafe.Pointer(&dst.avFrame.data)),
		(*_Ctype_int)(unsafe.Pointer(&dst.avFrame.linesize)))
}
コード例 #4
0
ファイル: screengen.go プロジェクト: reaperhulk/screengen
func (g *Generator) NextFrame() (image.Image, int64, error) {
	img := image.NewRGBA(image.Rect(0, 0, g.Width, g.Height))
	frame := C.av_frame_alloc()
	var pkt C.struct_AVPacket
	var frameFinished C.int
	for C.av_read_frame(g.avfContext, &pkt) == 0 {
		if int(pkt.stream_index) != g.vStreamIndex {
			C.av_free_packet(&pkt)
			continue
		}
		if C.avcodec_decode_video2(g.avcContext, frame, &frameFinished, &pkt) <= 0 {
			C.av_free_packet(&pkt)
			return nil, 0, errors.New("can't decode frame")
		}
		C.av_free_packet(&pkt)
		if frameFinished == 0 {
			continue
		}
		ctx := C.sws_getContext(
			C.int(g.Width),
			C.int(g.Height),
			g.avcContext.pix_fmt,
			C.int(g.Width),
			C.int(g.Height),
			C.PIX_FMT_RGBA,
			C.SWS_BICUBIC,
			nil,
			nil,
			nil,
		)
		if ctx == nil {
			return nil, 0, errors.New("can't allocate scaling context")
		}
		srcSlice := (**C.uint8_t)(&frame.data[0])
		srcStride := (*C.int)(&frame.linesize[0])
		dst := (**C.uint8_t)(unsafe.Pointer(&img.Pix))
		dstStride := (*C.int)(unsafe.Pointer(&[1]int{img.Stride}))
		C.sws_scale(
			ctx,
			srcSlice,
			srcStride,
			C.int(0),
			g.avcContext.height,
			dst,
			dstStride,
		)
		break
	}
	timestamp := int64(C.av_frame_get_best_effort_timestamp(frame))
	return img, timestamp, nil
}
コード例 #5
0
ファイル: swscale.go プロジェクト: psychobob666/gmf
func sws_scale(ctx *SwsContext, src *Frame, trg *Frame) int {
	in_data := (**C.uint8_t)(unsafe.Pointer(&src.avframe.data))
	in_line := (*_Ctype_int)(unsafe.Pointer(&src.avframe.linesize))
	out_data := (**C.uint8_t)(unsafe.Pointer(&trg.avframe.data))
	out_line := (*_Ctype_int)(unsafe.Pointer(&trg.avframe.linesize))
	result := int(C.sws_scale(ctx.sws,
		in_data,
		in_line,
		0,
		C.int(src.height),
		out_data,
		out_line))
	return result
}
コード例 #6
0
ファイル: sws.go プロジェクト: flexconstructor/gmf
func (this *SwsCtx) Scale(src *Frame, dst *Frame) {
	C.sws_scale(
		this.swsCtx,
		(**C.uint8_t)(unsafe.Pointer(&src.avFrame.data)),
		(*_Ctype_int)(unsafe.Pointer(&src.avFrame.linesize)),
		0,
		C.int(src.Height()),
		(**C.uint8_t)(unsafe.Pointer(&dst.avFrame.data)),
		(*_Ctype_int)(unsafe.Pointer(&dst.avFrame.linesize)))
	defer func() {
		if r := recover(); r != nil {
			buf := make([]byte, 1<<16)
			runtime.Stack(buf, false)
			reason := fmt.Sprintf("%v: %s", r, buf)
			fmt.Println("Runtime failure, reason -> %s", reason)
		}

	}()
}
コード例 #7
0
ファイル: ffmpeg.go プロジェクト: stephenwithav/ffmpeg
func (e *Encoder) WriteFrame() error {
	e._frame.pts = C.int64_t(e._context.frame_number)

	var input_data [3]*C.uint8_t
	var input_linesize [3]C.int

	switch im := e.im.(type) {
	case *image.RGBA:
		bpp := 4
		input_data = [3]*C.uint8_t{ptr(im.Pix)}
		input_linesize = [3]C.int{C.int(e.im.Bounds().Dx() * bpp)}
	case *image.NRGBA:
		bpp := 4
		input_data = [3]*C.uint8_t{ptr(im.Pix)}
		input_linesize = [3]C.int{C.int(e.im.Bounds().Dx() * bpp)}
	default:
		panic("Unknown input image type")
	}

	// Perform scaling from input type to output type
	C.sws_scale(e._swscontext, &input_data[0], &input_linesize[0],
		0, e._context.height,
		&e._frame.data[0], &e._frame.linesize[0])

	outsize := C.avcodec_encode_video(e._context, ptr(e._outbuf),
		C.int(len(e._outbuf)), e._frame)

	if outsize == 0 {
		return nil
	}

	n, err := e.Output.Write(e._outbuf[:outsize])
	if err != nil {
		return err
	}
	if n < int(outsize) {
		return fmt.Errorf("Short write, expected %d, wrote %d", outsize, n)
	}

	return nil
}
コード例 #8
0
ファイル: swscale.go プロジェクト: ieee0824/go-thumber
// Scale a YUVImage and return the new YUVImage
func Scale(src *jpeg.YUVImage, opts ScaleOptions) (*jpeg.YUVImage, error) {
	// Figure out what format we're dealing with
	var srcFmt, dstFmt int32
	var flags C.int
	flags = C.SWS_FULL_CHR_H_INT | C.int(opts.Filter) | C.SWS_ACCURATE_RND
	components := 3
	var dst jpeg.YUVImage
	dstFmt = C.PIX_FMT_YUV444P
	dst.Format = jpeg.YUV444
	switch src.Format {
	case jpeg.YUV444:
		srcFmt = C.PIX_FMT_YUV444P
		flags |= C.SWS_FULL_CHR_H_INP
	case jpeg.YUV422:
		srcFmt = C.PIX_FMT_YUV422P
	case jpeg.YUV440:
		srcFmt = C.PIX_FMT_YUV440P
	case jpeg.YUV420:
		srcFmt = C.PIX_FMT_YUV420P
	case jpeg.Grayscale:
		srcFmt = C.PIX_FMT_GRAY8
		dstFmt = C.PIX_FMT_GRAY8
		components = 1
		dst.Format = jpeg.Grayscale
	}

	// swscale can't handle images smaller than this; pad them
	paddedDstWidth := opts.DstWidth
	paddedSrcWidth := src.Width
	padFactor := 1
	for paddedDstWidth < 8 || paddedSrcWidth < 4 {
		paddedDstWidth *= 2
		paddedSrcWidth *= 2
		padFactor *= 2
	}

	// Get the SWS context
	sws := C.sws_getContext(C.int(paddedSrcWidth), C.int(src.Height), srcFmt,
		C.int(paddedDstWidth), C.int(opts.DstHeight), dstFmt,
		flags, nil, nil, nil)

	if sws == nil {
		return nil, errors.New("sws_getContext failed")
	}

	defer C.sws_freeContext(sws)

	// We only need 3 planes, but libswscale is stupid and checks the alignment
	// of all 4 pointers... better give it a dummy one.
	var srcYUVPtr [4](*uint8)
	var dstYUVPtr [4](*uint8)
	var srcStrides [4](C.int)
	var dstStrides [4](C.int)

	dst.Width = opts.DstWidth
	dst.Height = opts.DstHeight
	dstStride := pad(paddedDstWidth, jpeg.AlignSize)
	dstFinalPaddedWidth := pad(opts.DstWidth, jpeg.AlignSize)
	dstPaddedHeight := pad(opts.DstHeight, jpeg.AlignSize)
	// Allocate image planes and pointers
	for i := 0; i < components; i++ {
		dst.Stride[i] = dstStride
		dst.Data[i] = make([]byte, dstStride*dstPaddedHeight)
		dstYUVPtr[i] = (*uint8)(unsafe.Pointer(&dst.Data[i][0]))
		dstStrides[i] = C.int(dstStride)
		// apply horizontal padding if image is too small
		if padFactor > 1 {
			planeWidth := src.PlaneWidth(i)
			paddedWidth := planeWidth * padFactor
			planeHeight := src.PlaneHeight(i)
			paddedStride := pad(paddedWidth, jpeg.AlignSize)
			newData := make([]uint8, paddedStride*planeHeight)
			for y := 0; y < planeHeight; y++ {
				copy(newData[y*paddedStride:], src.Data[i][y*src.Stride[i]:y*src.Stride[i]+planeWidth])
				pixel := src.Data[i][y*src.Stride[i]+planeWidth-1]
				for x := planeWidth; x < paddedWidth; x++ {
					newData[y*paddedStride+x] = pixel
				}
			}
			srcStrides[i] = C.int(paddedStride)
			srcYUVPtr[i] = &newData[0]
		} else {
			srcStrides[i] = C.int(src.Stride[i])
			srcYUVPtr[i] = (*uint8)(unsafe.Pointer(&src.Data[i][0]))
		}
	}

	C.sws_scale(sws, (**C.uint8_t)(unsafe.Pointer(&srcYUVPtr[0])), &srcStrides[0], 0, C.int(src.Height),
		(**C.uint8_t)(unsafe.Pointer(&dstYUVPtr[0])), &dstStrides[0])

	// Replicate the last column and row of pixels as padding, which is typical
	// behavior prior to JPEG compression
	for i := 0; i < components; i++ {
		for y := 0; y < dst.Height; y++ {
			pixel := dst.Data[i][y*dstStride+dst.Width-1]
			for x := dst.Width; x < dstFinalPaddedWidth; x++ {
				dst.Data[i][y*dstStride+x] = pixel
			}
		}
		lastRow := dst.Data[i][dstStride*(dst.Height-1) : dstStride*dst.Height]
		for y := dst.Height; y < dstPaddedHeight; y++ {
			copy(dst.Data[i][y*dstStride:], lastRow)
		}
	}

	return &dst, nil
}
コード例 #9
0
// ImageWxH returns a screenshot at the ts milliseconds, scaled to the specified width and height.
func (g *Generator) ImageWxH(ts int64, width, height int) (image.Image, error) {
	img := image.NewRGBA(image.Rect(0, 0, width, height))
	frame := C.av_frame_alloc()
	defer C.av_frame_free(&frame)
	frameNum := C.av_rescale(
		C.int64_t(ts),
		C.int64_t(g.streams[g.vStreamIndex].time_base.den),
		C.int64_t(g.streams[g.vStreamIndex].time_base.num),
	) / 1000
	if C.avformat_seek_file(
		g.avfContext,
		C.int(g.vStreamIndex),
		0,
		frameNum,
		frameNum,
		C.AVSEEK_FLAG_FRAME,
	) < 0 {
		return nil, errors.New("can't seek to timestamp")
	}
	C.avcodec_flush_buffers(g.avcContext)
	var pkt C.struct_AVPacket
	var frameFinished C.int
	for C.av_read_frame(g.avfContext, &pkt) == 0 {
		if int(pkt.stream_index) != g.vStreamIndex {
			C.av_free_packet(&pkt)
			continue
		}
		if C.avcodec_decode_video2(g.avcContext, frame, &frameFinished, &pkt) <= 0 {
			C.av_free_packet(&pkt)
			return nil, errors.New("can't decode frame")
		}
		C.av_free_packet(&pkt)
		if frameFinished == 0 || pkt.dts < frameNum {
			continue
		}
		ctx := C.sws_getContext(
			C.int(g.Width),
			C.int(g.Height),
			g.avcContext.pix_fmt,
			C.int(width),
			C.int(height),
			C.PIX_FMT_RGBA,
			C.SWS_BICUBIC,
			nil,
			nil,
			nil,
		)
		if ctx == nil {
			return nil, errors.New("can't allocate scaling context")
		}
		srcSlice := (**C.uint8_t)(&frame.data[0])
		srcStride := (*C.int)(&frame.linesize[0])
		dst := (**C.uint8_t)(unsafe.Pointer(&img.Pix))
		dstStride := (*C.int)(unsafe.Pointer(&[1]int{img.Stride}))
		C.sws_scale(
			ctx,
			srcSlice,
			srcStride,
			0,
			g.avcContext.height,
			dst,
			dstStride,
		)
		break
	}
	return img, nil
}
コード例 #10
0
ファイル: decoder.go プロジェクト: reusee/player
func (self *Decoder) Start(videoStream, audioStream *C.AVStream,
	scaleWidth, scaleHeight C.int) *Decoder {

	self.running = true
	self.duration = time.Duration(self.FormatContext.duration * C.AV_TIME_BASE / 1000)
	vCodecCtx := videoStream.codec
	aCodecCtx := audioStream.codec

	self.durationPerSample = time.Second / time.Duration(aCodecCtx.sample_rate)

	// frame pool
	poolSize := 16
	pool := make(chan *C.AVFrame, poolSize)
	self.pool = pool
	numBytes := C.size_t(C.avpicture_get_size(C.PIX_FMT_YUV420P, scaleWidth, scaleHeight))
	for i := 0; i < poolSize; i++ {
		frame := C.av_frame_alloc()
		self.frames = append(self.frames, frame)
		buffer := (*C.uint8_t)(unsafe.Pointer(C.av_malloc(numBytes)))
		self.buffers = append(self.buffers, buffer)
		C.avpicture_fill((*C.AVPicture)(unsafe.Pointer(frame)), buffer, C.PIX_FMT_YUV420P,
			scaleWidth, scaleHeight)
		pool <- frame
	}

	// decode
	self.frameChan = make(chan *C.AVFrame, 512)
	go func() {
		runtime.LockOSThread()

		// scale context
		scaleContext := C.sws_getCachedContext(nil, vCodecCtx.width, vCodecCtx.height, vCodecCtx.pix_fmt,
			scaleWidth, scaleHeight, C.PIX_FMT_YUV420P, C.SWS_LANCZOS, nil, nil, nil)
		if scaleContext == nil {
			log.Fatal("get scale context failed")
		}

		// resample context
		resampleContext := C.swr_alloc_set_opts(nil,
			C.AV_CH_LAYOUT_STEREO, C.AV_SAMPLE_FMT_FLT, aCodecCtx.sample_rate,
			C.int64_t(aCodecCtx.channel_layout), aCodecCtx.sample_fmt, aCodecCtx.sample_rate,
			0, nil)
		if resampleContext == nil {
			log.Fatal("get resample context failed")
		}
		C.swr_init(resampleContext)

		var packet C.AVPacket
		var frameFinished C.int
		var pts int64
		var packetTime time.Duration
		vFrame := C.av_frame_alloc()
		aFrame := C.av_frame_alloc()
		videoIndex := videoStream.index
		audioIndex := audioStream.index
		resampleBuffer := (*C.uint8_t)(C.av_malloc(4096 * 8))
		resampleBufferp := &resampleBuffer

		self.Timer = NewTimer()

		// decode
		for self.running {

			// seek
			if self.seekTarget > 0 {
				if C.av_seek_frame(self.FormatContext, -1,
					C.int64_t(float64(self.seekNext)/float64(time.Second)*float64(C.AV_TIME_BASE)),
					C.AVSEEK_FLAG_BACKWARD) < 0 {
					log.Fatal("seek error")
				}
				for _, codecCtx := range self.openedCodecs {
					C.avcodec_flush_buffers(codecCtx)
				}
				p("frame seek done\n")
			}

		read_packet:
			// read packet
			C.av_free_packet(&packet)
			if C.av_read_frame(self.FormatContext, &packet) < 0 { // read packet
				log.Fatal("read frame error") //TODO stop gracefully
			}

			// get packet time
			if packet.dts != C.AV_NOPTS_VALUE {
				pts = int64(packet.dts)
			} else {
				pts = 0
			}
			if packet.stream_index == videoIndex {
				packetTime = time.Duration(float64(pts) * float64(C.av_q2d(videoStream.time_base)) * float64(time.Second))
			} else if packet.stream_index == audioIndex {
				packetTime = time.Duration(float64(pts) * float64(C.av_q2d(audioStream.time_base)) * float64(time.Second))
			} else { // ignore packet
				goto read_packet
			}
			p("packet time %v at timer time %v\n", packetTime, self.Timer.Now())

			// check seek
			if self.seekTarget > 0 && packetTime > 0 { // if packet time cannot determined, skip
				if packetTime < self.seekTarget { // seek again
					self.seekNext += self.seekStep
					p("seek again %v\n", self.seekNext)
				} else { // seek ok
					p("seek ok\n")
					self.seekTarget = 0
					self.Timer.Jump(packetTime - self.Timer.Now())
				}
			}

			// decode
			if packet.stream_index == videoIndex { // decode video
				if C.avcodec_decode_video2(vCodecCtx, vFrame, &frameFinished, &packet) < 0 {
					continue // bad packet
				}
				if frameFinished <= 0 {
					goto read_packet // frame not complete
				}
				bufFrame := <-pool        // get frame buffer
				C.sws_scale(scaleContext, // scale
					&vFrame.data[0], &vFrame.linesize[0], 0, vCodecCtx.height,
					&bufFrame.data[0], &bufFrame.linesize[0])
				bufFrame.pts = C.int64_t(packetTime) // set packet time
				self.frameChan <- bufFrame           // push to queue
				p("video frame %v\n", packetTime)

			} else if packet.stream_index == audioIndex { // decode audio
			decode_audio_packet:
				l := C.avcodec_decode_audio4(aCodecCtx, aFrame, &frameFinished, &packet)
				if l < 0 {
					continue // bad packet
				}
				if frameFinished <= 0 {
					goto read_packet // frame not complete
				}
				if frameFinished > 0 { // frame finished
					n := C.swr_convert(resampleContext, resampleBufferp, 4096,
						aFrame.extended_data, aFrame.nb_samples)
					if n != aFrame.nb_samples {
						log.Fatal("audio resample failed")
					}
					self.audioFrames <- &AudioFrame{
						time: packetTime,
						data: C.GoBytes(unsafe.Pointer(resampleBuffer), n*8),
					}
				}
				if l != packet.size { // multiple frame packet
					packet.size -= l
					packet.data = (*C.uint8_t)(unsafe.Pointer(uintptr(unsafe.Pointer(packet.data)) + uintptr(l)))
					goto decode_audio_packet
				}
				p("audio frame %v\n", packetTime)

			} else { // other stream
				goto read_packet
			}

		}
	}()

	// sync video
	go func() {
		maxDelta := (time.Second / time.Duration(C.av_q2d(videoStream.r_frame_rate)/10))
		for frame := range self.frameChan {
			delta := time.Duration(frame.pts) - self.Timer.Now()
			p("video frame %v, delta %v, max delta %v\n", time.Duration(frame.pts), delta, maxDelta)
			if delta > 0 {
				if delta > maxDelta {
					self.Timer.Jump(delta)
					print("timer jumped\n")
				} else {
					time.Sleep(delta)
				}
			} else if delta < 0 { // drop frame
				self.RecycleFrame(frame)
				continue
			}
			self.timedFrames <- frame
		}
	}()

	return self
}