Example #1
0
// updateBuffer fills the OpenGL buffer IF NEEDED.
// It updates the buffer if the bufferdataClean flag is false.
// It is safe to call this method every frame via the concrete classes method
// UpdateBuffer: most of the time it will just return immediately.
func (buffer *baseBuffer) update(vertexdata interface{}) {
	if buffer.bufferdataClean {
		return
	}
	if buffer.name == 0 {
		panic("tried to update buffer 0")
	}

	// Convert the Go-friendly data into OpenGL-friendly data.
	oldSize := len(buffer.bufferdata)
	bufferdata := new(bytes.Buffer)
	err := binary.Write(bufferdata, endianness, vertexdata)
	if err != nil {
		panic(err)
	}
	buffer.bufferdata = bufferdata.Bytes()
	buffer.bufferdataClean = true
	newSize := len(buffer.bufferdata)

	// Should we make the buffer bigger?
	needBigger := newSize > oldSize

	buffer.bind()

	if needBigger {
		// (Re)allocate a buffer.
		gl.BufferData(
			buffer.target,
			len(buffer.bufferdata),
			buffer.bufferdata,
			buffer.usage,
		)
		if err := CheckGlError(); err != nil {
			err.Description = "gl.BufferData"
			panic(err)
		}
	} else {
		// Re-use existing buffer.
		gl.BufferSubData(
			buffer.target,
			0,
			len(buffer.bufferdata),
			buffer.bufferdata,
		)
		if err := CheckGlError(); err != nil {
			err.Description = "gl.BufferSubData"
			panic(err)
		}
	}

	buffer.unbind()
}
Example #2
0
// buffer buffers the mesh data on the GPU.
// This calls glBufferData or glBufferSubData where appropriate.
func (a *Attr) buffer() {
	switch v := a.data.(type) {
	case []int8:
		size := len(v) * a.stride

		if size != a.gpuSize {
			gl.BufferData(a.target, size, v, a.usage)
			a.gpuSize = size
		} else {
			gl.BufferSubData(a.target, 0, size, v)
		}
	case []uint8:
		size := len(v) * a.stride

		if size != a.gpuSize {
			gl.BufferData(a.target, size, v, a.usage)
			a.gpuSize = size
		} else {
			gl.BufferSubData(a.target, 0, size, v)
		}
	case []int16:
		size := len(v) * a.stride

		if size != a.gpuSize {
			gl.BufferData(a.target, size, v, a.usage)
			a.gpuSize = size
		} else {
			gl.BufferSubData(a.target, 0, size, v)
		}
	case []uint16:
		size := len(v) * a.stride

		if size != a.gpuSize {
			gl.BufferData(a.target, size, v, a.usage)
			a.gpuSize = size
		} else {
			gl.BufferSubData(a.target, 0, size, v)
		}
	case []int32:
		size := len(v) * a.stride

		if size != a.gpuSize {
			gl.BufferData(a.target, size, v, a.usage)
			a.gpuSize = size
		} else {
			gl.BufferSubData(a.target, 0, size, v)
		}
	case []uint32:
		size := len(v) * a.stride

		if size != a.gpuSize {
			gl.BufferData(a.target, size, v, a.usage)
			a.gpuSize = size
		} else {
			gl.BufferSubData(a.target, 0, size, v)
		}
	case []float32:
		size := len(v) * a.stride

		if size != a.gpuSize {
			gl.BufferData(a.target, size, v, a.usage)
			a.gpuSize = size
		} else {
			gl.BufferSubData(a.target, 0, size, v)
		}
	case []float64:
		size := len(v) * a.stride

		if size != a.gpuSize {
			gl.BufferData(a.target, size, v, a.usage)
			a.gpuSize = size
		} else {
			gl.BufferSubData(a.target, 0, size, v)
		}

	}

	a.invalid = false
}