Documentation ¶
Overview ¶
Package ipfscluster is the heart of the IPFS Cluster implementation gluing together all the subcomponents and performing the core functionality.
This package also provide the Cluster GO API through the Cluster object, which allows to programatically build and control a cluster.
For an example on how to initialize components and cluster object, see cmd/rep-mgr-follow and cmd/rep-mgr-service.
Index ¶
- Constants
- Variables
- func DebugPrint(skip int)
- func DecodeClusterSecret(hexSecret string) ([]byte, error)
- func EncodeProtectorKey(secretBytes []byte) string
- func NewClusterHost(ctx context.Context, ident *config.Identity, cfg *Config, ds ds.Datastore) (host.Host, *pubsub.PubSub, *dual.DHT, error)
- func PeersFromMultiaddrs(addrs []ma.Multiaddr) []peer.ID
- func RPCServiceID(rpcSvc interface{}) string
- func SetFacilityLogLevel(f, l string)
- type API
- type Cluster
- func (c *Cluster) AddFile(ctx context.Context, reader *multipart.Reader, params api.AddParams) (api.Cid, error)
- func (c *Cluster) Alerts() []api.Alert
- func (c *Cluster) ConnectGraph() (api.ConnectGraph, error)
- func (c *Cluster) Done() <-chan struct{}
- func (c *Cluster) ID(ctx context.Context) api.ID
- func (c *Cluster) Join(ctx context.Context, addr ma.Multiaddr) error
- func (c *Cluster) PeerAdd(ctx context.Context, pid peer.ID) (*api.ID, error)
- func (c *Cluster) PeerRemove(ctx context.Context, pid peer.ID) error
- func (c *Cluster) Peers(ctx context.Context, out chan<- api.ID)
- func (c *Cluster) Pin(ctx context.Context, h api.Cid, opts api.PinOptions) (api.Pin, error)
- func (c *Cluster) PinGet(ctx context.Context, h api.Cid) (api.Pin, error)
- func (c *Cluster) PinPath(ctx context.Context, path string, opts api.PinOptions) (api.Pin, error)
- func (c *Cluster) PinUpdate(ctx context.Context, from api.Cid, to api.Cid, opts api.PinOptions) (api.Pin, error)
- func (c *Cluster) Pins(ctx context.Context, out chan<- api.Pin) error
- func (c *Cluster) Ready() <-chan struct{}
- func (c *Cluster) Recover(ctx context.Context, h api.Cid) (api.GlobalPinInfo, error)
- func (c *Cluster) RecoverAll(ctx context.Context, out chan<- api.GlobalPinInfo) error
- func (c *Cluster) RecoverAllLocal(ctx context.Context, out chan<- api.PinInfo) error
- func (c *Cluster) RecoverLocal(ctx context.Context, h api.Cid) (api.PinInfo, error)
- func (c *Cluster) RepoGC(ctx context.Context) (api.GlobalRepoGC, error)
- func (c *Cluster) RepoGCLocal(ctx context.Context) (api.RepoGC, error)
- func (c *Cluster) Shutdown(ctx context.Context) error
- func (c *Cluster) StateSync(ctx context.Context) error
- func (c *Cluster) Status(ctx context.Context, h api.Cid) (api.GlobalPinInfo, error)
- func (c *Cluster) StatusAll(ctx context.Context, filter api.TrackerStatus, out chan<- api.GlobalPinInfo) error
- func (c *Cluster) StatusAllLocal(ctx context.Context, filter api.TrackerStatus, out chan<- api.PinInfo) error
- func (c *Cluster) StatusLocal(ctx context.Context, h api.Cid) api.PinInfo
- func (c *Cluster) Unpin(ctx context.Context, h api.Cid) (api.Pin, error)
- func (c *Cluster) UnpinPath(ctx context.Context, path string) (api.Pin, error)
- func (c *Cluster) Version() string
- type ClusterRPCAPI
- func (rpcapi *ClusterRPCAPI) Alerts(ctx context.Context, in struct{}, out *[]api.Alert) error
- func (rpcapi *ClusterRPCAPI) BlockAllocate(ctx context.Context, in api.Pin, out *[]peer.ID) error
- func (rpcapi *ClusterRPCAPI) ConnectGraph(ctx context.Context, in struct{}, out *api.ConnectGraph) error
- func (rpcapi *ClusterRPCAPI) GetKnodeList(ctx context.Context, args map[string]string, out *api.KNodeAddrs) error
- func (rpcapi *ClusterRPCAPI) GetNodesForCid(ctx context.Context, cids []api.Cid, out *map[api.Cid][]api.Node) error
- func (rpcapi *ClusterRPCAPI) ID(ctx context.Context, in struct{}, out *api.ID) error
- func (rpcapi *ClusterRPCAPI) IDStream(ctx context.Context, in <-chan struct{}, out chan<- api.ID) error
- func (rpcapi *ClusterRPCAPI) IPFSID(ctx context.Context, in peer.ID, out *api.IPFSID) error
- func (rpcapi *ClusterRPCAPI) Join(ctx context.Context, in api.Multiaddr, out *struct{}) error
- func (rpcapi *ClusterRPCAPI) PeerAdd(ctx context.Context, in peer.ID, out *api.ID) error
- func (rpcapi *ClusterRPCAPI) PeerRemove(ctx context.Context, in peer.ID, out *struct{}) error
- func (rpcapi *ClusterRPCAPI) Peers(ctx context.Context, in <-chan struct{}, out chan<- api.ID) error
- func (rpcapi *ClusterRPCAPI) PeersWithFilter(ctx context.Context, in <-chan []peer.ID, out chan<- api.ID) error
- func (rpcapi *ClusterRPCAPI) Pin(ctx context.Context, in api.Pin, out *api.Pin) error
- func (rpcapi *ClusterRPCAPI) PinGet(ctx context.Context, in api.Cid, out *api.Pin) error
- func (rpcapi *ClusterRPCAPI) PinPath(ctx context.Context, in api.PinPath, out *api.Pin) error
- func (rpcapi *ClusterRPCAPI) Pins(ctx context.Context, in <-chan struct{}, out chan<- api.Pin) error
- func (rpcapi *ClusterRPCAPI) Recover(ctx context.Context, in api.Cid, out *api.GlobalPinInfo) error
- func (rpcapi *ClusterRPCAPI) RecoverAll(ctx context.Context, in <-chan struct{}, out chan<- api.GlobalPinInfo) error
- func (rpcapi *ClusterRPCAPI) RecoverAllLocal(ctx context.Context, in <-chan struct{}, out chan<- api.PinInfo) error
- func (rpcapi *ClusterRPCAPI) RecoverLocal(ctx context.Context, in api.Cid, out *api.PinInfo) error
- func (rpcapi *ClusterRPCAPI) RepoGC(ctx context.Context, in struct{}, out *api.GlobalRepoGC) error
- func (rpcapi *ClusterRPCAPI) RepoGCLocal(ctx context.Context, in struct{}, out *api.RepoGC) error
- func (rpcapi *ClusterRPCAPI) SendInformerMetrics(ctx context.Context, in struct{}, out *struct{}) error
- func (rpcapi *ClusterRPCAPI) SendInformersMetrics(ctx context.Context, in struct{}, out *struct{}) error
- func (rpcapi *ClusterRPCAPI) Status(ctx context.Context, in api.Cid, out *api.GlobalPinInfo) error
- func (rpcapi *ClusterRPCAPI) StatusAll(ctx context.Context, in <-chan api.TrackerStatus, out chan<- api.GlobalPinInfo) error
- func (rpcapi *ClusterRPCAPI) StatusAllLocal(ctx context.Context, in <-chan api.TrackerStatus, out chan<- api.PinInfo) error
- func (rpcapi *ClusterRPCAPI) StatusLocal(ctx context.Context, in api.Cid, out *api.PinInfo) error
- func (rpcapi *ClusterRPCAPI) Unpin(ctx context.Context, in api.Pin, out *api.Pin) error
- func (rpcapi *ClusterRPCAPI) UnpinPath(ctx context.Context, in api.PinPath, out *api.Pin) error
- func (rpcapi *ClusterRPCAPI) Version(ctx context.Context, in struct{}, out *api.Version) error
- type Component
- type Config
- func (cfg *Config) ApplyEnvVars() error
- func (cfg *Config) ConfigKey() string
- func (cfg *Config) Default() error
- func (cfg *Config) GetPeerstorePath() string
- func (cfg *Config) LoadJSON(raw []byte) error
- func (cfg *Config) ToDisplayJSON() ([]byte, error)
- func (cfg *Config) ToJSON() (raw []byte, err error)
- func (cfg *Config) Validate() error
- type ConnMgrConfig
- type Consensus
- type ConsensusRPCAPI
- func (rpcapi *ConsensusRPCAPI) AddPeer(ctx context.Context, in peer.ID, out *struct{}) error
- func (rpcapi *ConsensusRPCAPI) LogPin(ctx context.Context, in api.Pin, out *struct{}) error
- func (rpcapi *ConsensusRPCAPI) LogUnpin(ctx context.Context, in api.Pin, out *struct{}) error
- func (rpcapi *ConsensusRPCAPI) Peers(ctx context.Context, in struct{}, out *[]peer.ID) error
- func (rpcapi *ConsensusRPCAPI) RmPeer(ctx context.Context, in peer.ID, out *struct{}) error
- type IPFSConnector
- type IPFSConnectorRPCAPI
- func (rpcapi *IPFSConnectorRPCAPI) BlockGet(ctx context.Context, in api.Cid, out *[]byte) error
- func (rpcapi *IPFSConnectorRPCAPI) BlockStream(ctx context.Context, in <-chan api.NodeWithMeta, out chan<- struct{}) error
- func (rpcapi *IPFSConnectorRPCAPI) ConfigKey(ctx context.Context, in string, out *interface{}) error
- func (rpcapi *IPFSConnectorRPCAPI) Pin(ctx context.Context, in api.Pin, out *struct{}) error
- func (rpcapi *IPFSConnectorRPCAPI) PinLs(ctx context.Context, in <-chan []string, out chan<- api.IPFSPinInfo) error
- func (rpcapi *IPFSConnectorRPCAPI) PinLsCid(ctx context.Context, in api.Pin, out *api.PinDetail) error
- func (rpcapi *IPFSConnectorRPCAPI) RepoStat(ctx context.Context, in struct{}, out *api.IPFSRepoStat) error
- func (rpcapi *IPFSConnectorRPCAPI) Resolve(ctx context.Context, in string, out *api.Cid) error
- func (rpcapi *IPFSConnectorRPCAPI) SwarmPeers(ctx context.Context, in struct{}, out *[]peer.ID) error
- func (rpcapi *IPFSConnectorRPCAPI) Unpin(ctx context.Context, in api.Pin, out *struct{}) error
- type Informer
- type PeerMonitor
- type PeerMonitorRPCAPI
- type Peered
- type PinAllocator
- type PinTracker
- type PinTrackerRPCAPI
- func (rpcapi *PinTrackerRPCAPI) PinQueueSize(ctx context.Context, in struct{}, out *int64) error
- func (rpcapi *PinTrackerRPCAPI) Recover(ctx context.Context, in api.Cid, out *api.PinInfo) error
- func (rpcapi *PinTrackerRPCAPI) RecoverAll(ctx context.Context, in <-chan struct{}, out chan<- api.PinInfo) error
- func (rpcapi *PinTrackerRPCAPI) Status(ctx context.Context, in api.Cid, out *api.PinInfo) error
- func (rpcapi *PinTrackerRPCAPI) StatusAll(ctx context.Context, in <-chan api.TrackerStatus, out chan<- api.PinInfo) error
- func (rpcapi *PinTrackerRPCAPI) Track(ctx context.Context, in api.Pin, out *struct{}) error
- func (rpcapi *PinTrackerRPCAPI) Untrack(ctx context.Context, in api.Pin, out *struct{}) error
- type RPCEndpointType
- type Tracer
Constants ¶
const ( DefaultEnableRelayHop = true DefaultStateSyncInterval = 5 * time.Minute DefaultPinRecoverInterval = 12 * time.Minute DefaultMonitorPingInterval = 15 * time.Second DefaultPeerWatchInterval = 5 * time.Second DefaultReplicationFactor = -1 DefaultLeaveOnShutdown = false DefaultPinOnlyOnTrustedPeers = false DefaultDisableRepinning = true DefaultPeerstoreFile = "peerstore" DefaultConnMgrHighWater = 400 DefaultConnMgrLowWater = 100 DefaultConnMgrGracePeriod = 2 * time.Minute DefaultDialPeerTimeout = 3 * time.Second DefaultFollowerMode = false DefaultMDNSInterval = 10 * time.Second )
Configuration defaults
Variables ¶
var DefaultListenAddrs = []string{
"/ip4/0.0.0.0/tcp/9096",
"/ip4/0.0.0.0/udp/9096/quic",
}
DefaultListenAddrs contains TCP and QUIC listen addresses.
var DefaultRPCPolicy = map[string]RPCEndpointType{ "Cluster.Alerts": RPCClosed, "Cluster.BlockAllocate": RPCClosed, "Cluster.ConnectGraph": RPCClosed, "Cluster.ID": RPCOpen, "Cluster.IDStream": RPCOpen, "Cluster.IPFSID": RPCClosed, "Cluster.Join": RPCClosed, "Cluster.PeerAdd": RPCOpen, "Cluster.PeerRemove": RPCTrusted, "Cluster.Peers": RPCTrusted, "Cluster.PeersWithFilter": RPCClosed, "Cluster.Pin": RPCClosed, "Cluster.PinGet": RPCClosed, "Cluster.PinPath": RPCClosed, "Cluster.Pins": RPCClosed, "Cluster.PinStatus": RPCClosed, "Cluster.Recover": RPCClosed, "Cluster.RecoverAll": RPCClosed, "Cluster.RecoverAllLocal": RPCTrusted, "Cluster.RecoverLocal": RPCTrusted, "Cluster.RepoGC": RPCClosed, "Cluster.RepoGCLocal": RPCTrusted, "Cluster.SendInformerMetrics": RPCClosed, "Cluster.SendInformersMetrics": RPCClosed, "Cluster.Status": RPCClosed, "Cluster.StatusAll": RPCClosed, "Cluster.StatusAllLocal": RPCClosed, "Cluster.StatusLocal": RPCClosed, "Cluster.Unpin": RPCClosed, "Cluster.UnpinPath": RPCClosed, "Cluster.Version": RPCOpen, "Cluster.GetKnodeList": RPCClosed, "Cluster.GetNodesForCid": RPCClosed, "PinTracker.PinQueueSize": RPCClosed, "PinTracker.Recover": RPCTrusted, "PinTracker.RecoverAll": RPCClosed, "PinTracker.Status": RPCTrusted, "PinTracker.StatusAll": RPCTrusted, "PinTracker.Track": RPCClosed, "PinTracker.Untrack": RPCClosed, "IPFSConnector.BlockGet": RPCClosed, "IPFSConnector.BlockStream": RPCTrusted, "IPFSConnector.ConfigKey": RPCClosed, "IPFSConnector.Pin": RPCClosed, "IPFSConnector.PinLs": RPCClosed, "IPFSConnector.PinLsCid": RPCClosed, "IPFSConnector.RepoStat": RPCTrusted, "IPFSConnector.Resolve": RPCClosed, "IPFSConnector.SwarmPeers": RPCTrusted, "IPFSConnector.Unpin": RPCClosed, "Consensus.AddPeer": RPCTrusted, "Consensus.LogPin": RPCTrusted, "Consensus.LogUnpin": RPCTrusted, "Consensus.Peers": RPCClosed, "Consensus.RmPeer": RPCTrusted, "PeerMonitor.LatestMetrics": RPCClosed, "PeerMonitor.MetricNames": RPCClosed, }
DefaultRPCPolicy associates all rpc endpoints offered by cluster peers to an endpoint type. See rpcutil/policygen.go as a quick way to generate this without missing any endpoint.
var LoggingFacilities = map[string]string{
"cluster": "INFO",
"restapi": "INFO",
"restapilog": "INFO",
"pinsvcapi": "INFO",
"pinsvcapilog": "INFO",
"ipfsproxy": "INFO",
"ipfsproxylog": "INFO",
"ipfshttp": "INFO",
"monitor": "INFO",
"dsstate": "INFO",
"raft": "INFO",
"crdt": "INFO",
"pintracker": "INFO",
"diskinfo": "INFO",
"tags": "INFO",
"apitypes": "INFO",
"config": "INFO",
"shardingdags": "INFO",
"singledags": "INFO",
"adder": "INFO",
"optracker": "INFO",
"pstoremgr": "INFO",
"allocator": "INFO",
"knodemanager": "INFO",
}
LoggingFacilities provides a list of logging identifiers used by cluster and their default logging level.
var LoggingFacilitiesExtra = map[string]string{
"p2p-gorpc": "ERROR",
"swarm2": "ERROR",
"libp2p-raft": "FATAL",
"raftlib": "ERROR",
"badger": "INFO",
}
LoggingFacilitiesExtra provides logging identifiers used in rep-mgr dependencies, which may be useful to display. Along with their default value.
var ReadyTimeout = 30 * time.Second
ReadyTimeout specifies the time before giving up during startup (waiting for consensus to be ready) It may need adjustment according to timeouts in the consensus layer.
Functions ¶
func DebugPrint ¶
func DebugPrint(skip int)
func DecodeClusterSecret ¶
DecodeClusterSecret parses a hex-encoded string, checks that it is exactly 32 bytes long and returns its value as a byte-slice.x
func EncodeProtectorKey ¶
EncodeProtectorKey converts a byte slice to its hex string representation.
func NewClusterHost ¶
func NewClusterHost( ctx context.Context, ident *config.Identity, cfg *Config, ds ds.Datastore, ) (host.Host, *pubsub.PubSub, *dual.DHT, error)
NewClusterHost creates a fully-featured libp2p Host with the options from the provided cluster configuration. Using that host, it creates pubsub and a DHT instances (persisting to the given datastore), for shared use by all cluster components. The returned host uses the DHT for routing. Relay and NATService are additionally setup for this host.
func PeersFromMultiaddrs ¶
PeersFromMultiaddrs returns all the different peers in the given addresses. each peer only will appear once in the result, even if several multiaddresses for it are provided.
func RPCServiceID ¶
func RPCServiceID(rpcSvc interface{}) string
RPCServiceID returns the Service ID for the given RPCAPI object.
func SetFacilityLogLevel ¶
func SetFacilityLogLevel(f, l string)
SetFacilityLogLevel sets the log level for a given module
Types ¶
type API ¶
type API interface { Component }
API is a component which offers an API for Cluster. This is a base component.
type Cluster ¶
type Cluster struct {
// contains filtered or unexported fields
}
Cluster is the main IPFS cluster component. It provides the go-API for it and orchestrates the components that make up the system.
func NewCluster ¶
func NewCluster( ctx context.Context, host host.Host, dht *dual.DHT, cfg *Config, datastore ds.Datastore, consensus Consensus, apis []API, ipfs IPFSConnector, tracker PinTracker, monitor PeerMonitor, allocator PinAllocator, informers []Informer, tracer Tracer, ) (*Cluster, error)
NewCluster builds a new IPFS Cluster peer. It initializes a LibP2P host, creates and RPC Server and client and sets up all components.
The new cluster peer may still be performing initialization tasks when this call returns (consensus may still be bootstrapping). Use Cluster.Ready() if you need to wait until the peer is fully up.
func (*Cluster) AddFile ¶
func (c *Cluster) AddFile(ctx context.Context, reader *multipart.Reader, params api.AddParams) (api.Cid, error)
AddFile adds a file to the ipfs daemons of the cluster. The ipfs importer pipeline is used to DAGify the file. Depending on input parameters this DAG can be added locally to the calling cluster peer's ipfs repo, or sharded across the entire cluster.
func (*Cluster) Alerts ¶
Alerts returns the last alerts recorded by this cluster peer with the most recent first.
func (*Cluster) ConnectGraph ¶
func (c *Cluster) ConnectGraph() (api.ConnectGraph, error)
ConnectGraph returns a description of which cluster peers and ipfs daemons are connected to each other.
func (*Cluster) Done ¶
func (c *Cluster) Done() <-chan struct{}
Done provides a way to learn if the Peer has been shutdown (for example, because it has been removed from the Cluster)
func (*Cluster) Join ¶
Join adds this peer to an existing cluster by bootstrapping to a given multiaddress. It works by calling PeerAdd on the destination cluster and making sure that the new peer is ready to discover and contact the rest.
func (*Cluster) PeerAdd ¶
PeerAdd adds a new peer to this Cluster.
For it to work well, the new peer should be discoverable (part of our peerstore or connected to one of the existing peers) and reachable. Since PeerAdd allows to add peers which are not running, or reachable, it is recommended to call Join() from the new peer instead.
The new peer ID will be passed to the consensus component to be added to the peerset.
func (*Cluster) PeerRemove ¶
PeerRemove removes a peer from this Cluster.
The peer will be removed from the consensus peerset. This may first trigger repinnings for all content if not disabled.
func (*Cluster) Peers ¶
Peers returns the IDs of the members of this Cluster on the out channel. This method blocks until it has finished.
func (*Cluster) Pin ¶
Pin makes the cluster Pin a Cid. This implies adding the Cid to the IPFS Cluster peers shared-state. Depending on the cluster pinning strategy, the PinTracker may then request the IPFS daemon to pin the Cid.
Pin returns the Pin as stored in the global state (with the given allocations and an error if the operation could not be persisted. Pin does not reflect the success or failure of underlying IPFS daemon pinning operations which happen in async fashion.
If the options UserAllocations are non-empty then these peers are pinned with priority over other peers in the cluster. If the max repl factor is less than the size of the specified peerset then peers are chosen from this set in allocation order. If the minimum repl factor is greater than the size of this set then the remaining peers are allocated in order from the rest of the cluster. Priority allocations are best effort. If any priority peers are unavailable then Pin will simply allocate from the rest of the cluster.
If the Update option is set, the pin options (including allocations) will be copied from an existing one. This is equivalent to running PinUpdate.
func (*Cluster) PinGet ¶
PinGet returns information for a single Cid managed by Cluster. The information is obtained from the current global state. The returned api.Pin provides information about the allocations assigned for the requested Cid, but does not indicate if the item is successfully pinned. For that, use Status(). PinGet returns an error if the given Cid is not part of the global state.
func (*Cluster) PinPath ¶
PinPath pins an CID resolved from its IPFS Path. It returns the resolved Pin object.
func (*Cluster) PinUpdate ¶
func (c *Cluster) PinUpdate(ctx context.Context, from api.Cid, to api.Cid, opts api.PinOptions) (api.Pin, error)
PinUpdate pins a new CID based on an existing cluster Pin. The allocations and most pin options (replication factors) are copied from the existing Pin. The options object can be used to set the Name for the new pin and might support additional options in the future.
The from pin is NOT unpinned upon completion. The new pin might take advantage of efficient pin/update operation on IPFS-side (if the IPFSConnector supports it - the default one does). This may offer significant speed when pinning items which are similar to previously pinned content.
func (*Cluster) Pins ¶
Pins sends pins on the given out channel as it iterates the full pinset (current global state). This is the source of truth as to which pins are managed and their allocation, but does not indicate if the item is successfully pinned. For that, use the Status*() methods.
The operation can be aborted by canceling the context. This methods blocks until the operation has completed.
func (*Cluster) Ready ¶
func (c *Cluster) Ready() <-chan struct{}
Ready returns a channel which signals when this peer is fully initialized (including consensus).
func (*Cluster) Recover ¶
Recover triggers a recover operation for a given Cid in all cluster peers.
Recover operations ask IPFS to pin or unpin items in error state. Recover is faster than calling Pin on the same CID as it avoids committing an identical pin to the consensus layer.
func (*Cluster) RecoverAll ¶
RecoverAll triggers a RecoverAllLocal operation on all peers and returns GlobalPinInfo objets for all recovered items. This method blocks until finished. Operation can be aborted by canceling the context.
func (*Cluster) RecoverAllLocal ¶
RecoverAllLocal triggers a RecoverLocal operation for all Cids tracked by this peer.
Recover operations ask IPFS to pin or unpin items in error state. Recover is faster than calling Pin on the same CID as it avoids committing an identical pin to the consensus layer.
It returns the list of pins that were re-queued for pinning on the out channel. It blocks until done.
RecoverAllLocal is called automatically every PinRecoverInterval.
func (*Cluster) RecoverLocal ¶
RecoverLocal triggers a recover operation for a given Cid in this peer only. It returns the updated PinInfo, after recovery.
Recover operations ask IPFS to pin or unpin items in error state. Recover is faster than calling Pin on the same CID as it avoids committing an identical pin to the consensus layer.
func (*Cluster) RepoGCLocal ¶
RepoGCLocal performs garbage collection only on the local IPFS deamon.
func (*Cluster) Shutdown ¶
Shutdown performs all the necessary operations to shutdown the IPFS Cluster peer: * Save peerstore with the current peers * Remove itself from consensus when LeaveOnShutdown is set * It Shutdowns all the components * Collects all goroutines
Shutdown does not close the libp2p host, the DHT, the datastore or generally anything that Cluster did not create.
func (*Cluster) StateSync ¶
StateSync performs maintenance tasks on the global state that require looping through all the items. It is triggered automatically on StateSyncInterval. Currently it:
- Sends unpin for expired items for which this peer is "closest" (skipped for follower peers)
func (*Cluster) Status ¶
Status returns the GlobalPinInfo for a given Cid as fetched from all current peers. If an error happens, the GlobalPinInfo should contain as much information as could be fetched from the other peers.
func (*Cluster) StatusAll ¶
func (c *Cluster) StatusAll(ctx context.Context, filter api.TrackerStatus, out chan<- api.GlobalPinInfo) error
StatusAll returns the GlobalPinInfo for all tracked Cids in all peers on the out channel. This is done by broacasting a StatusAll to all peers. If an error happens, it is returned. This method blocks until it finishes. The operation can be aborted by canceling the context.
func (*Cluster) StatusAllLocal ¶
func (c *Cluster) StatusAllLocal(ctx context.Context, filter api.TrackerStatus, out chan<- api.PinInfo) error
StatusAllLocal returns the PinInfo for all the tracked Cids in this peer on the out channel. It blocks until finished.
func (*Cluster) StatusLocal ¶
StatusLocal returns this peer's PinInfo for a given Cid.
func (*Cluster) Unpin ¶
Unpin removes a previously pinned Cid from Cluster. It returns the global state Pin object as it was stored before removal, or an error if it was not possible to update the global state.
Unpin does not reflect the success or failure of underlying IPFS daemon unpinning operations, which happen in async fashion.
type ClusterRPCAPI ¶
type ClusterRPCAPI struct {
// contains filtered or unexported fields
}
ClusterRPCAPI is a go-libp2p-gorpc service which provides the internal peer API for the main cluster component.
func (*ClusterRPCAPI) BlockAllocate ¶
BlockAllocate returns allocations for blocks. This is used in the adders. It's different from pin allocations when ReplicationFactor < 0.
func (*ClusterRPCAPI) ConnectGraph ¶
func (rpcapi *ClusterRPCAPI) ConnectGraph(ctx context.Context, in struct{}, out *api.ConnectGraph) error
ConnectGraph runs Cluster.GetConnectGraph().
func (*ClusterRPCAPI) GetKnodeList ¶
func (rpcapi *ClusterRPCAPI) GetKnodeList(ctx context.Context, args map[string]string, out *api.KNodeAddrs) error
GetKnodeList 获取上传节点列表
func (*ClusterRPCAPI) GetNodesForCid ¶
func (rpcapi *ClusterRPCAPI) GetNodesForCid(ctx context.Context, cids []api.Cid, out *map[api.Cid][]api.Node) error
GetNodesForCid 获取下载节点
func (*ClusterRPCAPI) IDStream ¶
func (rpcapi *ClusterRPCAPI) IDStream(ctx context.Context, in <-chan struct{}, out chan<- api.ID) error
IDStream runs Cluster.ID() but in streaming form.
func (*ClusterRPCAPI) PeerRemove ¶
PeerRemove runs Cluster.PeerRm().
func (*ClusterRPCAPI) Peers ¶
func (rpcapi *ClusterRPCAPI) Peers(ctx context.Context, in <-chan struct{}, out chan<- api.ID) error
Peers runs Cluster.Peers().
func (*ClusterRPCAPI) PeersWithFilter ¶
func (rpcapi *ClusterRPCAPI) PeersWithFilter(ctx context.Context, in <-chan []peer.ID, out chan<- api.ID) error
PeersWithFilter runs Cluster.peersWithFilter().
func (*ClusterRPCAPI) Pins ¶
func (rpcapi *ClusterRPCAPI) Pins(ctx context.Context, in <-chan struct{}, out chan<- api.Pin) error
Pins runs Cluster.Pins().
func (*ClusterRPCAPI) Recover ¶
func (rpcapi *ClusterRPCAPI) Recover(ctx context.Context, in api.Cid, out *api.GlobalPinInfo) error
Recover runs Cluster.Recover().
func (*ClusterRPCAPI) RecoverAll ¶
func (rpcapi *ClusterRPCAPI) RecoverAll(ctx context.Context, in <-chan struct{}, out chan<- api.GlobalPinInfo) error
RecoverAll runs Cluster.RecoverAll().
func (*ClusterRPCAPI) RecoverAllLocal ¶
func (rpcapi *ClusterRPCAPI) RecoverAllLocal(ctx context.Context, in <-chan struct{}, out chan<- api.PinInfo) error
RecoverAllLocal runs Cluster.RecoverAllLocal().
func (*ClusterRPCAPI) RecoverLocal ¶
RecoverLocal runs Cluster.RecoverLocal().
func (*ClusterRPCAPI) RepoGC ¶
func (rpcapi *ClusterRPCAPI) RepoGC(ctx context.Context, in struct{}, out *api.GlobalRepoGC) error
RepoGC performs garbage collection sweep on all peers' repos.
func (*ClusterRPCAPI) RepoGCLocal ¶
RepoGCLocal performs garbage collection sweep only on the local peer's IPFS daemon.
func (*ClusterRPCAPI) SendInformerMetrics ¶
func (rpcapi *ClusterRPCAPI) SendInformerMetrics(ctx context.Context, in struct{}, out *struct{}) error
SendInformerMetrics runs Cluster.sendInformerMetric().
func (*ClusterRPCAPI) SendInformersMetrics ¶
func (rpcapi *ClusterRPCAPI) SendInformersMetrics(ctx context.Context, in struct{}, out *struct{}) error
SendInformersMetrics runs Cluster.sendInformerMetric() on all informers.
func (*ClusterRPCAPI) Status ¶
func (rpcapi *ClusterRPCAPI) Status(ctx context.Context, in api.Cid, out *api.GlobalPinInfo) error
Status runs Cluster.Status().
func (*ClusterRPCAPI) StatusAll ¶
func (rpcapi *ClusterRPCAPI) StatusAll(ctx context.Context, in <-chan api.TrackerStatus, out chan<- api.GlobalPinInfo) error
StatusAll runs Cluster.StatusAll().
func (*ClusterRPCAPI) StatusAllLocal ¶
func (rpcapi *ClusterRPCAPI) StatusAllLocal(ctx context.Context, in <-chan api.TrackerStatus, out chan<- api.PinInfo) error
StatusAllLocal runs Cluster.StatusAllLocal().
func (*ClusterRPCAPI) StatusLocal ¶
StatusLocal runs Cluster.StatusLocal().
type Component ¶
Component represents a piece of ipfscluster. Cluster components usually run their own goroutines (a http server for example). They communicate with the main Cluster component and other components (both local and remote), using an instance of rpc.Client.
type Config ¶
type Config struct { config.Saver // User-defined peername for use as human-readable identifier. Peername string // Cluster secret for private network. Peers will be in the same cluster if and // only if they have the same ClusterSecret. The cluster secret must be exactly // 64 characters and contain only hexadecimal characters (`[0-9a-f]`). Secret pnet.PSK // RPCPolicy defines access control to RPC endpoints. RPCPolicy map[string]RPCEndpointType // Leave Cluster on shutdown. Politely informs other peers // of the departure and removes itself from the consensus // peer set. The Cluster size will be reduced by one. LeaveOnShutdown bool // Listen parameters for the Cluster libp2p Host. Used by // the RPC and Consensus components. ListenAddr []ma.Multiaddr // Enables HOP relay for the node. If this is enabled, the node will act as // an intermediate (Hop Relay) node in relay circuits for connected peers. EnableRelayHop bool // ConnMgr holds configuration values for the connection manager for // the libp2p host. // FIXME: This only applies to rep-mgr-service. ConnMgr ConnMgrConfig // Sets the default dial timeout for libp2p connections to other // peers. DialPeerTimeout time.Duration // Time between syncs of the consensus state to the // tracker state. Normally states are synced anyway, but this helps // when new nodes are joining the cluster. Reduce for faster // consistency, increase with larger states. StateSyncInterval time.Duration // Time between automatic runs of the "recover" operation // which will retry to pin/unpin items in error state. PinRecoverInterval time.Duration // ReplicationFactorMax indicates the target number of nodes // that should pin content. For exampe, a replication_factor of // 3 will have cluster allocate each pinned hash to 3 peers if // possible. // See also ReplicationFactorMin. A ReplicationFactorMax of -1 // will allocate to every available node. ReplicationFactorMax int // ReplicationFactorMin indicates the minimum number of healthy // nodes pinning content. If the number of nodes available to pin // is less than this threshold, an error will be returned. // In the case of peer health issues, content pinned will be // re-allocated if the threshold is crossed. // For exampe, a ReplicationFactorMin of 2 will allocate at least // two peer to hold content, and return an error if this is not // possible. ReplicationFactorMin int // MonitorPingInterval is the frequency with which a cluster peer // sends a "ping" metric. The metric has a TTL set to the double of // this value. This metric sends information about this peer to other // peers. MonitorPingInterval time.Duration // PeerWatchInterval is the frequency that we use to watch for changes // in the consensus peerset and save new peers to the configuration // file. This also affects how soon we realize that we have // been removed from a cluster. PeerWatchInterval time.Duration // MDNSInterval controls the time between mDNS broadcasts to the // network announcing the peer addresses. Set to 0 to disable // mDNS. MDNSInterval time.Duration // PinOnlyOnTrustedPeers limits allocations to trusted peers only. PinOnlyOnTrustedPeers bool // If true, DisableRepinning, ensures that no repinning happens // when a node goes down. // This is useful when doing certain types of maintenance, or simply // when not wanting to rely on the monitoring system which needs a revamp. DisableRepinning bool // FollowerMode disables broadcast requests from this peer // (sync, recover, status) and disallows pinset management // operations (Pin/Unpin). FollowerMode bool // Peerstore file specifies the file on which we persist the // libp2p host peerstore addresses. This file is regularly saved. PeerstoreFile string // PeerAddresses stores additional addresses for peers that may or may // not be in the peerstore file. These are considered high priority // when bootstrapping the initial cluster connections. PeerAddresses []ma.Multiaddr // Tracing flag used to skip tracing specific paths when not enabled. Tracing bool }
Config is the configuration object containing customizable variables to initialize the main rep-mgr component. It implements the config.ComponentConfig interface.
func (*Config) ApplyEnvVars ¶
ApplyEnvVars fills in any Config fields found as environment variables.
func (*Config) Default ¶
Default fills in all the Config fields with default working values. This means, it will generate a Secret.
func (*Config) GetPeerstorePath ¶
GetPeerstorePath returns the full path of the PeerstoreFile, obtained by concatenating that value with BaseDir of the configuration, if set. An empty string is returned when BaseDir is not set.
func (*Config) LoadJSON ¶
LoadJSON receives a raw json-formatted configuration and sets the Config fields from it. Note that it should be JSON as generated by ToJSON().
func (*Config) ToDisplayJSON ¶
ToDisplayJSON returns JSON config as a string.
type ConnMgrConfig ¶
ConnMgrConfig configures the libp2p host connection manager.
type Consensus ¶
type Consensus interface { Component // Returns a channel to signal that the consensus layer is ready // allowing the main component to wait for it during start. Ready(context.Context) <-chan struct{} // Logs a pin operation. LogPin(context.Context, api.Pin) error // Logs an unpin operation. LogUnpin(context.Context, api.Pin) error AddPeer(context.Context, peer.ID) error RmPeer(context.Context, peer.ID) error State(context.Context) (state.ReadOnly, error) // Provide a node which is responsible to perform // specific tasks which must only run in 1 cluster peer. Leader(context.Context) (peer.ID, error) // Only returns when the consensus state has all log // updates applied to it. WaitForSync(context.Context) error // Clean removes all consensus data. Clean(context.Context) error // Peers returns the peerset participating in the Consensus. Peers(context.Context) ([]peer.ID, error) // IsTrustedPeer returns true if the given peer is "trusted". // This will grant access to more rpc endpoints and a // non-trusted one. This should be fast as it will be // called repeatedly for every remote RPC request. IsTrustedPeer(context.Context, peer.ID) bool // Trust marks a peer as "trusted". Trust(context.Context, peer.ID) error // Distrust removes a peer from the "trusted" set. Distrust(context.Context, peer.ID) error }
Consensus is a component which keeps a shared state in IPFS Cluster and triggers actions on updates to that state. Currently, Consensus needs to be able to elect/provide a Cluster Leader and the implementation is very tight to the Cluster main component.
type ConsensusRPCAPI ¶
type ConsensusRPCAPI struct {
// contains filtered or unexported fields
}
ConsensusRPCAPI is a go-libp2p-gorpc service which provides the internal peer API for the Consensus component.
type IPFSConnector ¶
type IPFSConnector interface { Component //Ready provides a channel to notify when IPFS is ready. It allows the //main cluster component to wait for IPFS to be in working state //before starting full-fledge operations. Ready(context.Context) <-chan struct{} ID(context.Context) (api.IPFSID, error) Pin(context.Context, api.Pin) error Unpin(context.Context, api.Pin) error //PinLsCid(context.Context, api.Pin) (api.IPFSPinStatus, error) PinLsCid(context.Context, api.Pin) (api.PinDetail, error) // PinLs returns pins in the pinset of the given types (recursive, direct...) PinLs(ctx context.Context, typeFilters []string, out chan<- api.IPFSPinInfo) error // ConnectSwarms make sure this peer's IPFS daemon is connected to // other peers IPFS daemons. ConnectSwarms(context.Context) error // SwarmPeers returns the IPFS daemon's swarm peers. SwarmPeers(context.Context) ([]peer.ID, error) // ConfigKey returns the value for a configuration key. // Subobjects are reached with keypaths as "Parent/Child/GrandChild...". ConfigKey(keypath string) (interface{}, error) // RepoStat returns the current repository size and max limit as // provided by "repo stat". RepoStat(context.Context) (api.IPFSRepoStat, error) // RepoGC performs garbage collection sweep on the IPFS repo. RepoGC(context.Context) (api.RepoGC, error) // Resolve returns a cid given a path. Resolve(context.Context, string) (api.Cid, error) // BlockStream adds a stream of blocks to IPFS. BlockStream(context.Context, <-chan api.NodeWithMeta) error // BlockGet retrieves the raw data of an IPFS block. BlockGet(context.Context, api.Cid) ([]byte, error) //Alerts return a alert chan Alerts() <-chan api.Alert }
IPFSConnector is a component which allows cluster to interact with an IPFS daemon. This is a base component.
type IPFSConnectorRPCAPI ¶
type IPFSConnectorRPCAPI struct {
// contains filtered or unexported fields
}
IPFSConnectorRPCAPI is a go-libp2p-gorpc service which provides the internal peer API for the IPFSConnector component.
func (*IPFSConnectorRPCAPI) BlockStream ¶
func (rpcapi *IPFSConnectorRPCAPI) BlockStream(ctx context.Context, in <-chan api.NodeWithMeta, out chan<- struct{}) error
BlockStream runs IPFSConnector.BlockStream().
func (*IPFSConnectorRPCAPI) ConfigKey ¶
func (rpcapi *IPFSConnectorRPCAPI) ConfigKey(ctx context.Context, in string, out *interface{}) error
ConfigKey runs IPFSConnector.ConfigKey().
func (*IPFSConnectorRPCAPI) PinLs ¶
func (rpcapi *IPFSConnectorRPCAPI) PinLs(ctx context.Context, in <-chan []string, out chan<- api.IPFSPinInfo) error
PinLs runs IPFSConnector.PinLs().
func (*IPFSConnectorRPCAPI) PinLsCid ¶
func (rpcapi *IPFSConnectorRPCAPI) PinLsCid(ctx context.Context, in api.Pin, out *api.PinDetail) error
PinLsCid runs IPFSConnector.PinLsCid().
func (*IPFSConnectorRPCAPI) RepoStat ¶
func (rpcapi *IPFSConnectorRPCAPI) RepoStat(ctx context.Context, in struct{}, out *api.IPFSRepoStat) error
RepoStat runs IPFSConnector.RepoStat().
func (*IPFSConnectorRPCAPI) SwarmPeers ¶
func (rpcapi *IPFSConnectorRPCAPI) SwarmPeers(ctx context.Context, in struct{}, out *[]peer.ID) error
SwarmPeers runs IPFSConnector.SwarmPeers().
type Informer ¶
type Informer interface { Component Name() string // GetMetrics returns the metrics obtained by this Informer. It must // always return at least one metric. GetMetrics(context.Context) []api.Metric }
Informer provides Metric information from a peer. The metrics produced by informers are then passed to a PinAllocator which will use them to determine where to pin content. The metric is agnostic to the rest of Cluster.
type PeerMonitor ¶
type PeerMonitor interface { Component // LogMetric stores a metric. It can be used to manually inject // a metric to a monitor. LogMetric(context.Context, api.Metric) error // PublishMetric sends a metric to the rest of the peers. // How to send it, and to who, is to be decided by the implementation. PublishMetric(context.Context, api.Metric) error // LatestMetrics returns a map with the latest valid metrics of matching // name for the current cluster peers. The result should only contain // one metric per peer at most. LatestMetrics(ctx context.Context, name string) []api.Metric // Returns the latest metric received from a peer. It may be expired. LatestForPeer(ctx context.Context, name string, pid peer.ID) api.Metric // MetricNames returns a list of metric names. MetricNames(ctx context.Context) []string // Alerts delivers alerts generated when this peer monitor detects // a problem (i.e. metrics not arriving as expected). Alerts can be used // to trigger self-healing measures or re-pinnings of content. Alerts() <-chan api.Alert }
PeerMonitor is a component in charge of publishing a peer's metrics and reading metrics from other peers in the cluster. The PinAllocator will use the metrics provided by the monitor as candidates for Pin allocations.
The PeerMonitor component also provides an Alert channel which is signaled when a metric is no longer received and the monitor identifies it as a problem.
type PeerMonitorRPCAPI ¶
type PeerMonitorRPCAPI struct {
// contains filtered or unexported fields
}
PeerMonitorRPCAPI is a go-libp2p-gorpc service which provides the internal peer API for the PeerMonitor component.
func (*PeerMonitorRPCAPI) LatestMetrics ¶
func (rpcapi *PeerMonitorRPCAPI) LatestMetrics(ctx context.Context, in string, out *[]api.Metric) error
LatestMetrics runs PeerMonitor.LatestMetrics().
func (*PeerMonitorRPCAPI) MetricNames ¶
func (rpcapi *PeerMonitorRPCAPI) MetricNames(ctx context.Context, in struct{}, out *[]string) error
MetricNames runs PeerMonitor.MetricNames().
type Peered ¶
type Peered interface { AddPeer(ctx context.Context, p peer.ID) RmPeer(ctx context.Context, p peer.ID) }
Peered represents a component which needs to be aware of the peers in the Cluster and of any changes to the peer set.
type PinAllocator ¶
type PinAllocator interface { Component // Allocate returns the list of peers that should be assigned to // Pin content in order of preference (from the most preferred to the // least). The "current" map contains valid metrics for peers // which are currently pinning the content. The candidates map // contains the metrics for all peers which are eligible for pinning // the content. Allocate(ctx context.Context, c api.Cid, current, candidates, priority api.MetricsSet) ([]peer.ID, error) // Metrics returns the list of metrics that the allocator needs. Metrics() []string }
PinAllocator decides where to pin certain content. In order to make such decision, it receives the pin arguments, the peers which are currently allocated to the content and metrics available for all peers which could allocate the content.
type PinTracker ¶
type PinTracker interface { Component // Track tells the tracker that a Cid is now under its supervision // The tracker may decide to perform an IPFS pin. Track(context.Context, api.Pin) error // Untrack tells the tracker that a Cid is to be forgotten. The tracker // may perform an IPFS unpin operation. Untrack(context.Context, api.Cid) error // StatusAll returns the list of pins with their local status. Takes a // filter to specify which statuses to report. StatusAll(context.Context, api.TrackerStatus, chan<- api.PinInfo) error // Status returns the local status of a given Cid. Status(context.Context, api.Cid) api.PinInfo // RecoverAll calls Recover() for all pins tracked. RecoverAll(context.Context, chan<- api.PinInfo) error // Recover retriggers a Pin/Unpin operation in a Cids with error status. Recover(context.Context, api.Cid) (api.PinInfo, error) // PinQueueSize returns the current size of the pinning queue. PinQueueSize(context.Context) (int64, error) //GetPinFromState get pin from state GetPinFromState(ctx context.Context, cid api.Cid) api.Pin }
PinTracker represents a component which tracks the status of the pins in this cluster and ensures they are in sync with the IPFS daemon. This component should be thread safe.
type PinTrackerRPCAPI ¶
type PinTrackerRPCAPI struct {
// contains filtered or unexported fields
}
PinTrackerRPCAPI is a go-libp2p-gorpc service which provides the internal peer API for the PinTracker component.
func (*PinTrackerRPCAPI) PinQueueSize ¶
func (rpcapi *PinTrackerRPCAPI) PinQueueSize(ctx context.Context, in struct{}, out *int64) error
PinQueueSize runs PinTracker.PinQueueSize().
func (*PinTrackerRPCAPI) RecoverAll ¶
func (rpcapi *PinTrackerRPCAPI) RecoverAll(ctx context.Context, in <-chan struct{}, out chan<- api.PinInfo) error
RecoverAll runs PinTracker.RecoverAll().f
func (*PinTrackerRPCAPI) StatusAll ¶
func (rpcapi *PinTrackerRPCAPI) StatusAll(ctx context.Context, in <-chan api.TrackerStatus, out chan<- api.PinInfo) error
StatusAll runs PinTracker.StatusAll().
type RPCEndpointType ¶
type RPCEndpointType int
RPCEndpointType controls how access is granted to an RPC endpoint
const ( // RPCClosed endpoints can only be called by the local cluster peer // on itself. RPCClosed RPCEndpointType = iota // RPCTrusted endpoints can be called by "trusted" peers. // It depends which peers are considered trusted. For example, // in "raft" mode, Cluster will all peers as "trusted". In "crdt" mode, // trusted peers are those specified in the configuration. RPCTrusted // RPCOpen endpoints can be called by any peer in the Cluster swarm. RPCOpen )
RPC endpoint types w.r.t. trust level
Source Files ¶
Directories ¶
Path | Synopsis |
---|---|
Package adder implements functionality to add content to IPFS daemons managed by the Cluster.
|
Package adder implements functionality to add content to IPFS daemons managed by the Cluster. |
adderutils
Package adderutils provides some utilities for adding content to cluster.
|
Package adderutils provides some utilities for adding content to cluster. |
ipfsadd
Package ipfsadd is a simplified copy of go-ipfs/core/coreunix/add.go
|
Package ipfsadd is a simplified copy of go-ipfs/core/coreunix/add.go |
sharding
Package sharding implements a sharding ClusterDAGService places content in different shards while it's being added, creating a final Cluster DAG and pinning it.
|
Package sharding implements a sharding ClusterDAGService places content in different shards while it's being added, creating a final Cluster DAG and pinning it. |
single
Package single implements a ClusterDAGService that chunks and adds content to cluster without sharding, before pinning it.
|
Package single implements a ClusterDAGService that chunks and adds content to cluster without sharding, before pinning it. |
allocator
|
|
balanced
Package balanced implements an allocator that can sort allocations based on multiple metrics, where metrics may be an arbitrary way to partition a set of peers.
|
Package balanced implements an allocator that can sort allocations based on multiple metrics, where metrics may be an arbitrary way to partition a set of peers. |
Package api holds declarations for types used in rep-mgr APIs to make them re-usable across differen tools.
|
Package api holds declarations for types used in rep-mgr APIs to make them re-usable across differen tools. |
common
Package common implements all the things that an IPFS Cluster API component must do, except the actual routes that it handles.
|
Package common implements all the things that an IPFS Cluster API component must do, except the actual routes that it handles. |
common/test
Package test provides utility methods to test APIs based on the common API.
|
Package test provides utility methods to test APIs based on the common API. |
ipfsproxy
Package ipfsproxy implements the Cluster API interface by providing an IPFS HTTP interface as exposed by the go-ipfs daemon.
|
Package ipfsproxy implements the Cluster API interface by providing an IPFS HTTP interface as exposed by the go-ipfs daemon. |
pb
Package pb provides protobuf definitions for serialized types in Cluster.
|
Package pb provides protobuf definitions for serialized types in Cluster. |
pinsvcapi
Package pinsvcapi implements an IPFS Cluster API component which provides an IPFS Pinning Services API to the cluster.
|
Package pinsvcapi implements an IPFS Cluster API component which provides an IPFS Pinning Services API to the cluster. |
pinsvcapi/pinsvc
Package pinsvc contains type definitions for the Pinning Services API
|
Package pinsvc contains type definitions for the Pinning Services API |
rest
Package rest implements an IPFS Cluster API component.
|
Package rest implements an IPFS Cluster API component. |
rest/client
Package client provides a Go Client for the IPFS Cluster API provided by the "api/rest" component.
|
Package client provides a Go Client for the IPFS Cluster API provided by the "api/rest" component. |
cmd
|
|
rep-mgr-ctl
The rep-mgr-ctl application.
|
The rep-mgr-ctl application. |
rep-mgr-follow
The rep-mgr-follow application.
|
The rep-mgr-follow application. |
rep-mgr-service
The rep-mgr-service application.
|
The rep-mgr-service application. |
Package cmdutils contains utilities to facilitate building of command line applications launching cluster peers.
|
Package cmdutils contains utilities to facilitate building of command line applications launching cluster peers. |
Package config provides interfaces and utilities for different Cluster components to register, read, write and validate configuration sections stored in a central configuration file.
|
Package config provides interfaces and utilities for different Cluster components to register, read, write and validate configuration sections stored in a central configuration file. |
consensus
|
|
crdt
Package crdt implements the IPFS Cluster consensus interface using CRDT-datastore to replicate the cluster global state to every peer.
|
Package crdt implements the IPFS Cluster consensus interface using CRDT-datastore to replicate the cluster global state to every peer. |
raft
Package raft implements a Consensus component for IPFS Cluster which uses Raft (go-libp2p-raft).
|
Package raft implements a Consensus component for IPFS Cluster which uses Raft (go-libp2p-raft). |
datastore
|
|
badger
Package badger provides a configurable BadgerDB go-datastore for use with IPFS Cluster.
|
Package badger provides a configurable BadgerDB go-datastore for use with IPFS Cluster. |
inmem
Package inmem provides a in-memory thread-safe datastore for use with Cluster.
|
Package inmem provides a in-memory thread-safe datastore for use with Cluster. |
leveldb
Package leveldb provides a configurable LevelDB go-datastore for use with IPFS Cluster.
|
Package leveldb provides a configurable LevelDB go-datastore for use with IPFS Cluster. |
informer
|
|
disk
Package disk implements an rep-mgr informer which can provide different disk-related metrics from the IPFS daemon as an api.Metric.
|
Package disk implements an rep-mgr informer which can provide different disk-related metrics from the IPFS daemon as an api.Metric. |
numpin
Package numpin implements an rep-mgr informer which determines how many items this peer is pinning and returns it as api.Metric
|
Package numpin implements an rep-mgr informer which determines how many items this peer is pinning and returns it as api.Metric |
pinqueue
Package pinqueue implements an rep-mgr informer which issues the current size of the pinning queue.
|
Package pinqueue implements an rep-mgr informer which issues the current size of the pinning queue. |
tags
Package tags implements an rep-mgr informer publishes user-defined tags as metrics.
|
Package tags implements an rep-mgr informer publishes user-defined tags as metrics. |
ipfsconn
|
|
ipfshttp
Package ipfshttp implements an IPFS Cluster IPFSConnector component.
|
Package ipfshttp implements an IPFS Cluster IPFSConnector component. |
monitor
|
|
metrics
Package metrics provides common functionality for working with metrics, particularly useful for monitoring components.
|
Package metrics provides common functionality for working with metrics, particularly useful for monitoring components. |
pubsubmon
Package pubsubmon implements a PeerMonitor component for IPFS Cluster that uses PubSub to send and receive metrics.
|
Package pubsubmon implements a PeerMonitor component for IPFS Cluster that uses PubSub to send and receive metrics. |
Package observations sets up metric and trace exporting for IPFS cluster.
|
Package observations sets up metric and trace exporting for IPFS cluster. |
pintracker
|
|
optracker
Package optracker implements functionality to track the status of pin and operations as needed by implementations of the pintracker component.
|
Package optracker implements functionality to track the status of pin and operations as needed by implementations of the pintracker component. |
stateless
Package stateless implements a PinTracker component for IPFS Cluster, which aims to reduce the memory footprint when handling really large cluster states.
|
Package stateless implements a PinTracker component for IPFS Cluster, which aims to reduce the memory footprint when handling really large cluster states. |
Package pstoremgr provides a Manager that simplifies handling addition, listing and removal of cluster peer multiaddresses from the libp2p Host.
|
Package pstoremgr provides a Manager that simplifies handling addition, listing and removal of cluster peer multiaddresses from the libp2p Host. |
Package rpcutil provides utility methods to perform go-libp2p-gorpc calls, particularly gorpc.MultiCall().
|
Package rpcutil provides utility methods to perform go-libp2p-gorpc calls, particularly gorpc.MultiCall(). |
Package state holds the interface that any state implementation for IPFS Cluster must satisfy.
|
Package state holds the interface that any state implementation for IPFS Cluster must satisfy. |
dsstate
Package dsstate implements the IPFS Cluster state interface using an underlying go-datastore.
|
Package dsstate implements the IPFS Cluster state interface using an underlying go-datastore. |
Package test offers testing utilities for all the IPFS Cluster codebase, like IPFS daemon and RPC mocks and pre-defined testing CIDs.
|
Package test offers testing utilities for all the IPFS Cluster codebase, like IPFS daemon and RPC mocks and pre-defined testing CIDs. |
Package version stores version information for IPFS Cluster.
|
Package version stores version information for IPFS Cluster. |