gotch

package module
v1.1.1 Latest Latest
Warning

This package is not in the latest version of its module.

Go to latest
Published: Jul 5, 2021 License: Apache-2.0 Imports: 4 Imported by: 0

README

Gotch LicenseGo.Dev referenceTravis CIGo Report Card

Overview

Gotch creates a thin wrapper to Pytorch C++ APIs (Libtorch) to make use of its already optimized C++ tensor APIs (~ over 1400) and dynamic graph computation with CUDA support and provides idiomatic Go APIs for developing and implementing Deep Learning in Go.

Some features are

  • Comprehensive Pytorch tensor APIs (~ 1404)
  • Fully featured Pytorch dynamic graph computation
  • JIT interface to run model trained/saved using PyTorch Python API
  • Load pretrained Pytorch models and run inference
  • Pure Go APIs to build and train neural network models with both CPU and GPU support
  • Most recent image models
  • NLP Language models - Transformer in separate package built with GoTch and pure Go Tokenizer.

Gotch is in active development mode and may have API breaking changes. Feel free to pull request, report issues or discuss any concerns. All contributions are welcome.

Dependencies

  • Libtorch C++ v1.7.0 library of Pytorch

Installation

  • Default CUDA version is 10.1 if CUDA is available otherwise using CPU version.
  • Default Pytorch C++ API version is 1.7.0

NOTE: libtorch will be installed at /usr/local/lib

CPU
Step 1: Setup libtorch (skip this step if a valid libtorch already installed in your machine!)
    wget https://raw.githubusercontent.com/zonghaowang/gotch/master/setup-libtorch.sh
    chmod +x setup-libtorch.sh
    export CUDA_VER=cpu && bash setup-libtorch.sh

Update Environment: in Debian/Ubuntu, add/update the following lines to .bashrc file

    export GOTCH_LIBTORCH="/usr/local/lib/libtorch"
    export LIBRARY_PATH="$LIBRARY_PATH:$GOTCH_LIBTORCH/lib"
    export CPATH="$CPATH:$GOTCH_LIBTORCH/lib:$GOTCH_LIBTORCH/include:$GOTCH_LIBTORCH/include/torch/csrc/api/include"
    export LD_LIBRARY_PATH="$LD_LIBRARY_PATH:$GOTCH_LIBTORCH/lib"
Step 2: Setup gotch
    wget https://raw.githubusercontent.com/zonghaowang/gotch/master/setup-gotch.sh
    chmod +x setup-gotch.sh
    export CUDA_VER=cpu && export GOTCH_VER=v0.3.10 && bash setup-gotch.sh
GPU

NOTE: make sure your machine has working CUDA.

Step 1: Setup libtorch (skip this step if a valid libtorch already installed in your machine!)
    wget https://raw.githubusercontent.com/zonghaowang/gotch/master/setup-libtorch.sh
    chmod +x setup-libtorch.sh

    # CUDA 10.1
    export CUDA_VER=10.1 && bash setup-libtorch.sh
    # CUDA 11.0
    export CUDA_VER=11.0 && bash setup-libtorch.sh

Update Environment: in Debian/Ubuntu, add/update the following lines to .bashrc file

    export GOTCH_LIBTORCH="/usr/local/lib/libtorch"
    export LIBRARY_PATH="$LIBRARY_PATH:$GOTCH_LIBTORCH/lib"
    export CPATH="$CPATH:$GOTCH_LIBTORCH/lib:$GOTCH_LIBTORCH/include:$GOTCH_LIBTORCH/include/torch/csrc/api/include"
    LD_LIBRARY_PATH="$LD_LIBRARY_PATH:$GOTCH_LIBTORCH/lib:/usr/lib64-nvidia:/usr/local/cuda-${CUDA_VERSION}/lib64"
Step 2: Setup gotch
    wget https://raw.githubusercontent.com/zonghaowang/gotch/master/setup-gotch.sh
    chmod +x setup-gotch.sh
    # CUDA 10.1
    export CUDA_VER=10.1 && export GOTCH_VER=v0.3.10 && bash setup-gotch.sh
    # CUDA 11.0
    export CUDA_VER=11.0 && export GOTCH_VER=v0.3.10 && bash setup-gotch.sh

Examples

Basic tensor operations
import (
	"fmt"

	"github.com/zonghaowang/gotch"
	ts "github.com/zonghaowang/gotch/tensor"
)

func basicOps() {

xs := ts.MustRand([]int64{3, 5, 6}, gotch.Float, gotch.CPU)
fmt.Printf("%8.3f\n", xs)
fmt.Printf("%i", xs)

/*
(1,.,.) =
   0.391     0.055     0.638     0.514     0.757     0.446  
   0.817     0.075     0.437     0.452     0.077     0.492  
   0.504     0.945     0.863     0.243     0.254     0.640  
   0.850     0.132     0.763     0.572     0.216     0.116  
   0.410     0.660     0.156     0.336     0.885     0.391  

(2,.,.) =
   0.952     0.731     0.380     0.390     0.374     0.001  
   0.455     0.142     0.088     0.039     0.862     0.939  
   0.621     0.198     0.728     0.914     0.168     0.057  
   0.655     0.231     0.680     0.069     0.803     0.243  
   0.853     0.729     0.983     0.534     0.749     0.624  

(3,.,.) =
   0.734     0.447     0.914     0.956     0.269     0.000  
   0.427     0.034     0.477     0.535     0.440     0.972  
   0.407     0.945     0.099     0.184     0.778     0.058  
   0.482     0.996     0.085     0.605     0.282     0.671  
   0.887     0.029     0.005     0.216     0.354     0.262  



TENSOR INFO:
        Shape:          [3 5 6]
        DType:          float32
        Device:         {CPU 1}
        Defined:        true
*/

// Basic tensor operations
ts1 := ts.MustArange(ts.IntScalar(6), gotch.Int64, gotch.CPU).MustView([]int64{2, 3}, true)
defer ts1.MustDrop()
ts2 := ts.MustOnes([]int64{3, 4}, gotch.Int64, gotch.CPU)
defer ts2.MustDrop()

mul := ts1.MustMatmul(ts2, false)
defer mul.MustDrop()

fmt.Printf("ts1:\n%2d", ts1)
fmt.Printf("ts2:\n%2d", ts2)
fmt.Printf("mul tensor (ts1 x ts2):\n%2d", mul)

/*
ts1:
 0   1   2  
 3   4   5  

ts2:
 1   1   1   1  
 1   1   1   1  
 1   1   1   1  

mul tensor (ts1 x ts2):
 3   3   3   3  
12  12  12  12  
*/


// In-place operation
ts3 := ts.MustOnes([]int64{2, 3}, gotch.Float, gotch.CPU)
fmt.Printf("Before:\n%v", ts3)
ts3.MustAdd1_(ts.FloatScalar(2.0))
fmt.Printf("After (ts3 + 2.0):\n%v", ts3)

/*
Before:
1  1  1  
1  1  1  

After (ts3 + 2.0):
3  3  3  
3  3  3  
*/
}
Simplified Convolutional neural network
import (
    "fmt"

    "github.com/zonghaowang/gotch"
    "github.com/zonghaowang/gotch/nn"
    ts "github.com/zonghaowang/gotch/tensor"
)

type Net struct {
    conv1 *nn.Conv2D
    conv2 *nn.Conv2D
    fc    *nn.Linear
}

func newNet(vs *nn.Path) *Net {
    conv1 := nn.NewConv2D(vs, 1, 16, 2, nn.DefaultConv2DConfig())
    conv2 := nn.NewConv2D(vs, 16, 10, 2, nn.DefaultConv2DConfig())
    fc := nn.NewLinear(vs, 10, 10, nn.DefaultLinearConfig())

    return &Net{
        conv1,
        conv2,
        fc,
    }
}

func (n Net) ForwardT(xs *ts.Tensor, train bool) *ts.Tensor {
    xs = xs.MustView([]int64{-1, 1, 8, 8}, false)

    outC1 := xs.Apply(n.conv1)
    outMP1 := outC1.MaxPool2DDefault(2, true)
    defer outMP1.MustDrop()

    outC2 := outMP1.Apply(n.conv2)
    outMP2 := outC2.MaxPool2DDefault(2, true)
    outView2 := outMP2.MustView([]int64{-1, 10}, true)
    defer outView2.MustDrop()

    outFC := outView2.Apply(n.fc)
    return outFC.MustRelu(true)
}

func main() {

    vs := nn.NewVarStore(gotch.CPU)
    net := newNet(vs.Root())

    xs := ts.MustOnes([]int64{8, 8}, gotch.Float, gotch.CPU)

    logits := net.ForwardT(xs, false)
    fmt.Printf("Logits: %0.3f", logits)
}

//Logits: 0.000  0.000  0.000  0.225  0.321  0.147  0.000  0.207  0.000  0.000

Play with gotch on Google Colab or locally

Getting Started

License

Gotch is Apache 2.0 licensed.

Acknowledgement

  • This project has been inspired and used many concepts from tch-rs Libtorch Rust binding.

Documentation

Index

Constants

This section is empty.

Variables

View Source
var (
	CPU  Device = Device{Name: "CPU", Value: -1}
	CUDA Cuda   = Cuda{Name: "CUDA", Value: 0}
)

Functions

func DTypeSize

func DTypeSize(dt DType) (retVal uint, err error)

DTypeSize returns DType size in Bytes

func ElementGoType

func ElementGoType(data interface{}) (retVal reflect.Type, err error)

ElementGoType infers and returns Go type of element in given data

func IsSupportedScalar

func IsSupportedScalar(k reflect.Kind) bool

IsSupportedScalar checks whether given SCALAR type is supported TODO: check input is a scalar.

func ToGoType

func ToGoType(dtype DType) (retVal reflect.Type, err error)

ToGoType infers and returns supported equivalent Go type from given DType

func TypeOf

func TypeOf(dt DType, shape []int64) (retVal reflect.Type, err error)

TypeOf infers and returns element Go type from given tensor DType and shape

Types

type CInt

type CInt = int32

CInt is equal to C type int. Go type is int32

func DType2CInt

func DType2CInt(dt DType) (retVal CInt, err error)

type Cuda

type Cuda Device

func (Cuda) CudnnIsAvailable

func (cu Cuda) CudnnIsAvailable() bool

CudnnIsAvailable return true if cudnn support is available

func (Cuda) CudnnSetBenchmark

func (cu Cuda) CudnnSetBenchmark(b bool)

CudnnSetBenchmark sets cudnn benchmark mode

When set cudnn will try to optimize the generators during the first network runs and then use the optimized architecture in the following runs. This can result in significant performance improvements.

func (Cuda) DeviceCount

func (cu Cuda) DeviceCount() int64

DeviceCount returns the number of GPU that can be used.

func (Cuda) IsAvailable

func (cu Cuda) IsAvailable() bool

CudnnIsAvailable returns true if cuda support is available

type DType

type DType struct {
	reflect.Type
}

DType represents different kind of element that a tensor can hold. It has an embedded `reflect.Type` for type reflection.

var (
	Uint8 DType = DType{reflect.TypeOf(uint8(1))} // 0
	Int8  DType = DType{reflect.TypeOf(int8(1))}  // 1
	Int16 DType = DType{reflect.TypeOf(int16(1))} // 2
	Int   DType = DType{reflect.TypeOf(int32(1))} // 3
	Int64 DType = DType{reflect.TypeOf(int64(1))} // 4
	// Half       DType   = DType{reflect.TypeOf(GoFloat16(1))}     // 5
	Float  DType = DType{reflect.TypeOf(float32(1))} // 6
	Double DType = DType{reflect.TypeOf(float64(1))} // 7
	// ComplexHalf DType  = DType{reflect.TypeOf(GoComplexHalf(1))} // 8
	// ComplexFloat DType  = DType{reflect.TypeOf(complex64(1))}  // 9
	// ComplexDouble DType = DType{reflect.TypeOf(complex128(1))} // 10
	Bool DType = DType{reflect.TypeOf(true)} // 11
)

TODO: double check these Torch DType to Go type

func CInt2DType

func CInt2DType(v CInt) (dtype DType, err error)

func DTypeFromData

func DTypeFromData(data interface{}) (retVal DType, err error)

DTypeFromData infers returns equavalent DType from given data

func DataDType

func DataDType(v interface{}, shape []int64) (retVal DType, err error)

DataDType infers and returns data type of tensor data

func ElementDType

func ElementDType(v interface{}) (retVal DType, err error)

ElementDType infers and returns its own tensor data type

func ToDType

func ToDType(typ reflect.Type) (retVal DType, err error)

ToDType infers and returns supported equivalent DType from given Go type

func (DType) CInt

func (dt DType) CInt() (retVal CInt)

type DTypeDevice

type DTypeDevice struct {
	DType  DType
	Device Device
}

type Device

type Device struct {
	Name  string
	Value int
}

func CudaBuilder

func CudaBuilder(v uint) Device

func CudaIfAvailable

func CudaIfAvailable() Device

CudaIfAvailable returns a GPU device if available, else CPU.

func NewCuda

func NewCuda() Device

NewCuda creates a cuda device (default) if available If will be panic if cuda is not available.

func (Device) CInt

func (d Device) CInt() CInt

func (Device) CudaIfAvailable

func (d Device) CudaIfAvailable() Device

CudaIfAvailable returns a GPU device if available, else default to CPU

func (Device) IsCuda

func (d Device) IsCuda() bool

IsCuda returns whether device is a Cuda device

func (Device) OfCInt

func (d Device) OfCInt(v CInt) Device

Directories

Path Synopsis
example
jit
yolo/freetype
The freetype package provides a convenient API to draw text onto an image.
The freetype package provides a convenient API to draw text onto an image.
NOTE: functions in this file would be automatically generated and named as `c-generated.go`
NOTE: functions in this file would be automatically generated and named as `c-generated.go`
aug

Jump to

Keyboard shortcuts

? : This menu
/ : Search site
f or F : Jump to
y or Y : Canonical URL