Documentation ¶
Overview ¶
Package commands implements the ipfs command interface
Using github.com/ipfs/kubo/commands to define the command line and HTTP APIs. This is the interface available to folks using IPFS from outside of the Go language.
Index ¶
- Constants
- Variables
- func CommandsCmd(root *cmds.Command) *cmds.Command
- func CompletionCmd(root *cmds.Command) *cmds.Command
- func CreateCmdExtras(opts ...func(e *cmds.Extra)) *cmds.Extra
- func DaemonNotRunning(req *cmds.Request, env cmds.Environment) error
- func ExternalBinary(instructions string) *cmds.Command
- func GetDoesNotUseConfigAsInput(e *cmds.Extra) (val bool, found bool)
- func GetDoesNotUseRepo(e *cmds.Extra) (val bool, found bool)
- func GetPreemptsAutoUpdate(e *cmds.Extra) (val bool, found bool)
- func ParsePeerParam(text string) (ma.Multiaddr, peer.ID, error)
- func SetDoesNotUseConfigAsInput(val bool) func(e *cmds.Extra)
- func SetDoesNotUseRepo(val bool) func(e *cmds.Extra)
- func SetPreemptsAutoUpdate(val bool) func(e *cmds.Extra)
- type AddEvent
- type BlockStat
- type BootstrapOutput
- type CidFormatRes
- type CodeAndName
- type Command
- type ConfigField
- type ConfigUpdateOutput
- type Dependency
- type GcResult
- type IdOutput
- type KeyList
- type KeyOutput
- type KeyOutputList
- type KeyRenameOutput
- type KeySignOutput
- type KeyVerifyOutput
- type LsLink
- type LsObject
- type LsOutput
- type MessageOutput
- type Option
- type P2PListenerInfoOutput
- type P2PLsOutput
- type P2PStreamInfoOutput
- type P2PStreamsOutput
- type PingResult
- type RefWrapper
- type RefWriter
- type RepoVersion
- type TimeParts
- type VerifyProgress
- type VersionCheckOutput
Constants ¶
const ( RepoDirOption = "repo-dir" ConfigFileOption = "config-file" ConfigOption = "config" DebugOption = "debug" LocalOption = "local" // DEPRECATED: use OfflineOption OfflineOption = "offline" ApiOption = "api" //nolint ApiAuthOption = "api-auth" //nolint )
const DefaultMinimalVersionFraction = 0.05 // 5%
const P2PProtoPrefix = "/x/"
P2PProtoPrefix is the default required prefix for protocol names
Variables ¶
var ( ErrNotOnline = errors.New("this command must be run in online mode. Try running 'ipfs daemon' first") ErrSelfUnsupported = errors.New("finding your own node in the DHT is currently not supported") )
var ActiveReqsCmd = &cmds.Command{ Helptext: cmds.HelpText{ Tagline: "List commands run on this IPFS node.", ShortDescription: ` Lists running and recently run commands. `, }, NoLocal: true, Run: func(req *cmds.Request, res cmds.ResponseEmitter, env cmds.Environment) error { ctx := env.(*oldcmds.Context) return cmds.EmitOnce(res, ctx.ReqLog.Report()) }, Options: []cmds.Option{ cmds.BoolOption(verboseOptionName, "v", "Print extra information."), }, Subcommands: map[string]*cmds.Command{ "clear": clearInactiveCmd, "set-time": setRequestClearCmd, }, Encoders: cmds.EncoderMap{ cmds.Text: cmds.MakeTypedEncoder(func(req *cmds.Request, w io.Writer, out *[]*cmds.ReqLogEntry) error { verbose, _ := req.Options[verboseOptionName].(bool) tw := tabwriter.NewWriter(w, 4, 4, 2, ' ', 0) if verbose { fmt.Fprint(tw, "ID\t") } fmt.Fprint(tw, "Command\t") if verbose { fmt.Fprint(tw, "Arguments\tOptions\t") } fmt.Fprintln(tw, "Active\tStartTime\tRunTime") for _, req := range *out { if verbose { fmt.Fprintf(tw, "%d\t", req.ID) } fmt.Fprintf(tw, "%s\t", req.Command) if verbose { fmt.Fprintf(tw, "%v\t[", req.Args) var keys []string for k := range req.Options { keys = append(keys, k) } sort.Strings(keys) for _, k := range keys { fmt.Fprintf(tw, "%s=%v,", k, req.Options[k]) } fmt.Fprintf(tw, "]\t") } var live time.Duration if req.Active { live = time.Since(req.StartTime) } else { live = req.EndTime.Sub(req.StartTime) } t := req.StartTime.Format(time.Stamp) fmt.Fprintf(tw, "%t\t%s\t%s\n", req.Active, t, live) } tw.Flush() return nil }), }, Type: []*cmds.ReqLogEntry{}, }
var AddCmd = &cmds.Command{ Helptext: cmds.HelpText{ Tagline: "Add a file or directory to IPFS.", ShortDescription: ` Adds the content of <path> to IPFS. Use -r to add directories (recursively). `, LongDescription: ` Adds the content of <path> to IPFS. Use -r to add directories. Note that directories are added recursively, to form the IPFS MerkleDAG. If the daemon is not running, it will just add locally. If the daemon is started later, it will be advertised after a few seconds when the reprovider runs. The wrap option, '-w', wraps the file (or files, if using the recursive option) in a directory. This directory contains only the files which have been added, and means that the file retains its filename. For example: > ipfs add example.jpg added QmbFMke1KXqnYyBBWxB74N4c5SBnJMVAiMNRcGu6x1AwQH example.jpg > ipfs add example.jpg -w added QmbFMke1KXqnYyBBWxB74N4c5SBnJMVAiMNRcGu6x1AwQH example.jpg added QmaG4FuMqEBnQNn3C8XJ5bpW8kLs7zq2ZXgHptJHbKDDVx You can now refer to the added file in a gateway, like so: /ipfs/QmaG4FuMqEBnQNn3C8XJ5bpW8kLs7zq2ZXgHptJHbKDDVx/example.jpg Files imported with 'ipfs add' are protected from GC (implicit '--pin=true'), but it is up to you to remember the returned CID to get the data back later. Passing '--to-files' creates a reference in Files API (MFS), making it easier to find it in the future: > ipfs files mkdir -p /myfs/dir > ipfs add example.jpg --to-files /myfs/dir/ > ipfs files ls /myfs/dir/ example.jpg See 'ipfs files --help' to learn more about using MFS for keeping track of added files and directories. The chunker option, '-s', specifies the chunking strategy that dictates how to break files into blocks. Blocks with same content can be deduplicated. Different chunking strategies will produce different hashes for the same file. The default is a fixed block size of 256 * 1024 bytes, 'size-262144'. Alternatively, you can use the Buzhash or Rabin fingerprint chunker for content defined chunking by specifying buzhash or rabin-[min]-[avg]-[max] (where min/avg/max refer to the desired chunk sizes in bytes), e.g. 'rabin-262144-524288-1048576'. The following examples use very small byte sizes to demonstrate the properties of the different chunkers on a small file. You'll likely want to use a 1024 times larger chunk sizes for most files. > ipfs add --chunker=size-2048 ipfs-logo.svg added QmafrLBfzRLV4XSH1XcaMMeaXEUhDJjmtDfsYU95TrWG87 ipfs-logo.svg > ipfs add --chunker=rabin-512-1024-2048 ipfs-logo.svg added Qmf1hDN65tR55Ubh2RN1FPxr69xq3giVBz1KApsresY8Gn ipfs-logo.svg You can now check what blocks have been created by: > ipfs object links QmafrLBfzRLV4XSH1XcaMMeaXEUhDJjmtDfsYU95TrWG87 QmY6yj1GsermExDXoosVE3aSPxdMNYr6aKuw3nA8LoWPRS 2059 Qmf7ZQeSxq2fJVJbCmgTrLLVN9tDR9Wy5k75DxQKuz5Gyt 1195 > ipfs object links Qmf1hDN65tR55Ubh2RN1FPxr69xq3giVBz1KApsresY8Gn QmY6yj1GsermExDXoosVE3aSPxdMNYr6aKuw3nA8LoWPRS 2059 QmerURi9k4XzKCaaPbsK6BL5pMEjF7PGphjDvkkjDtsVf3 868 QmQB28iwSriSUSMqG2nXDTLtdPHgWb4rebBrU7Q1j4vxPv 338 Finally, a note on hash (CID) determinism and 'ipfs add' command. Almost all the flags provided by this command will change the final CID, and new flags may be added in the future. It is not guaranteed for the implicit defaults of 'ipfs add' to remain the same in future Kubo releases, or for other IPFS software to use the same import parameters as Kubo. If you need to back up or transport content-addressed data using a non-IPFS medium, CID can be preserved with CAR files. See 'dag export' and 'dag import' for more information. `, }, Arguments: []cmds.Argument{ cmds.FileArg("path", true, true, "The path to a file to be added to IPFS.").EnableRecursive().EnableStdin(), }, Options: []cmds.Option{ cmds.OptionRecursivePath, cmds.OptionDerefArgs, cmds.OptionStdinName, cmds.OptionHidden, cmds.OptionIgnore, cmds.OptionIgnoreRules, cmds.BoolOption(quietOptionName, "q", "Write minimal output."), cmds.BoolOption(quieterOptionName, "Q", "Write only final hash."), cmds.BoolOption(silentOptionName, "Write no output."), cmds.BoolOption(progressOptionName, "p", "Stream progress data."), cmds.BoolOption(trickleOptionName, "t", "Use trickle-dag format for dag generation."), cmds.BoolOption(onlyHashOptionName, "n", "Only chunk and hash - do not write to disk."), cmds.BoolOption(wrapOptionName, "w", "Wrap files with a directory object."), cmds.StringOption(chunkerOptionName, "s", "Chunking algorithm, size-[bytes], rabin-[min]-[avg]-[max] or buzhash"), cmds.BoolOption(rawLeavesOptionName, "Use raw blocks for leaf nodes."), cmds.BoolOption(noCopyOptionName, "Add the file using filestore. Implies raw-leaves. (experimental)"), cmds.BoolOption(fstoreCacheOptionName, "Check the filestore for pre-existing blocks. (experimental)"), cmds.IntOption(cidVersionOptionName, "CID version. Defaults to 0 unless an option that depends on CIDv1 is passed. Passing version 1 will cause the raw-leaves option to default to true."), cmds.StringOption(hashOptionName, "Hash function to use. Implies CIDv1 if not sha2-256. (experimental)"), cmds.BoolOption(inlineOptionName, "Inline small blocks into CIDs. (experimental)"), cmds.IntOption(inlineLimitOptionName, "Maximum block size to inline. (experimental)").WithDefault(32), cmds.BoolOption(pinOptionName, "Pin locally to protect added files from garbage collection.").WithDefault(true), cmds.StringOption(toFilesOptionName, "Add reference to Files API (MFS) at the provided path."), cmds.BoolOption(preserveModeOptionName, "Apply existing POSIX permissions to created UnixFS entries. Disables raw-leaves. (experimental)"), cmds.BoolOption(preserveMtimeOptionName, "Apply existing POSIX modification time to created UnixFS entries. Disables raw-leaves. (experimental)"), cmds.UintOption(modeOptionName, "Custom POSIX file mode to store in created UnixFS entries. Disables raw-leaves. (experimental)"), cmds.Int64Option(mtimeOptionName, "Custom POSIX modification time to store in created UnixFS entries (seconds before or after the Unix Epoch). Disables raw-leaves. (experimental)"), cmds.UintOption(mtimeNsecsOptionName, "Custom POSIX modification time (optional time fraction in nanoseconds)"), }, PreRun: func(req *cmds.Request, env cmds.Environment) error { quiet, _ := req.Options[quietOptionName].(bool) quieter, _ := req.Options[quieterOptionName].(bool) quiet = quiet || quieter silent, _ := req.Options[silentOptionName].(bool) if !quiet && !silent { _, found := req.Options[progressOptionName].(bool) if !found { req.Options[progressOptionName] = true } } return nil }, Run: func(req *cmds.Request, res cmds.ResponseEmitter, env cmds.Environment) error { api, err := cmdenv.GetApi(env, req) if err != nil { return err } nd, err := cmdenv.GetNode(env) if err != nil { return err } cfg, err := nd.Repo.Config() if err != nil { return err } progress, _ := req.Options[progressOptionName].(bool) trickle, _ := req.Options[trickleOptionName].(bool) wrap, _ := req.Options[wrapOptionName].(bool) onlyHash, _ := req.Options[onlyHashOptionName].(bool) silent, _ := req.Options[silentOptionName].(bool) chunker, _ := req.Options[chunkerOptionName].(string) dopin, _ := req.Options[pinOptionName].(bool) rawblks, rbset := req.Options[rawLeavesOptionName].(bool) nocopy, _ := req.Options[noCopyOptionName].(bool) fscache, _ := req.Options[fstoreCacheOptionName].(bool) cidVer, cidVerSet := req.Options[cidVersionOptionName].(int) hashFunStr, _ := req.Options[hashOptionName].(string) inline, _ := req.Options[inlineOptionName].(bool) inlineLimit, _ := req.Options[inlineLimitOptionName].(int) toFilesStr, toFilesSet := req.Options[toFilesOptionName].(string) preserveMode, _ := req.Options[preserveModeOptionName].(bool) preserveMtime, _ := req.Options[preserveMtimeOptionName].(bool) mode, _ := req.Options[modeOptionName].(uint) mtime, _ := req.Options[mtimeOptionName].(int64) mtimeNsecs, _ := req.Options[mtimeNsecsOptionName].(uint) if chunker == "" { chunker = cfg.Import.UnixFSChunker.WithDefault(config.DefaultUnixFSChunker) } if hashFunStr == "" { hashFunStr = cfg.Import.HashFunction.WithDefault(config.DefaultHashFunction) } if !cidVerSet && !cfg.Import.CidVersion.IsDefault() { cidVerSet = true cidVer = int(cfg.Import.CidVersion.WithDefault(config.DefaultCidVersion)) } if !rbset && cfg.Import.UnixFSRawLeaves != config.Default { rbset = true rawblks = cfg.Import.UnixFSRawLeaves.WithDefault(config.DefaultUnixFSRawLeaves) } if preserveMode || preserveMtime || mode != 0 || mtime != 0 { if rbset && rawblks { return fmt.Errorf("%s can't be used with UnixFS metadata like mode or modification time", rawLeavesOptionName) } rbset = true rawblks = false } if onlyHash && toFilesSet { return fmt.Errorf("%s and %s options are not compatible", onlyHashOptionName, toFilesOptionName) } hashFunCode, ok := mh.Names[strings.ToLower(hashFunStr)] if !ok { return fmt.Errorf("unrecognized hash function: %q", strings.ToLower(hashFunStr)) } enc, err := cmdenv.GetCidEncoder(req) if err != nil { return err } toadd := req.Files if wrap { toadd = files.NewSliceDirectory([]files.DirEntry{ files.FileEntry("", req.Files), }) } opts := []options.UnixfsAddOption{ options.Unixfs.Hash(hashFunCode), options.Unixfs.Inline(inline), options.Unixfs.InlineLimit(inlineLimit), options.Unixfs.Chunker(chunker), options.Unixfs.Pin(dopin), options.Unixfs.HashOnly(onlyHash), options.Unixfs.FsCache(fscache), options.Unixfs.Nocopy(nocopy), options.Unixfs.Progress(progress), options.Unixfs.Silent(silent), options.Unixfs.PreserveMode(preserveMode), options.Unixfs.PreserveMtime(preserveMtime), } if mode != 0 { opts = append(opts, options.Unixfs.Mode(os.FileMode(mode))) } if mtime != 0 { opts = append(opts, options.Unixfs.Mtime(mtime, uint32(mtimeNsecs))) } else if mtimeNsecs != 0 { return fmt.Errorf("option %q requires %q to be provided as well", mtimeNsecsOptionName, mtimeOptionName) } if cidVerSet { opts = append(opts, options.Unixfs.CidVersion(cidVer)) } if rbset { opts = append(opts, options.Unixfs.RawLeaves(rawblks)) } if trickle { opts = append(opts, options.Unixfs.Layout(options.TrickleLayout)) } opts = append(opts, nil) ipfsNode, err := cmdenv.GetNode(env) if err != nil { return err } var added int var fileAddedToMFS bool addit := toadd.Entries() for addit.Next() { _, dir := addit.Node().(files.Directory) errCh := make(chan error, 1) events := make(chan interface{}, adderOutChanSize) opts[len(opts)-1] = options.Unixfs.Events(events) go func() { var err error defer close(events) pathAdded, err := api.Unixfs().Add(req.Context, addit.Node(), opts...) if err != nil { errCh <- err return } if toFilesSet { if toFilesStr == "" { toFilesStr = "/" } toFilesDst, err := checkPath(toFilesStr) if err != nil { errCh <- fmt.Errorf("%s: %w", toFilesOptionName, err) return } dstAsDir := toFilesDst[len(toFilesDst)-1] == '/' if dstAsDir { mfsNode, err := mfs.Lookup(ipfsNode.FilesRoot, toFilesDst) if err != nil { errCh <- fmt.Errorf("%s: MFS destination directory %q does not exist: %w", toFilesOptionName, toFilesDst, err) return } if mfsNode.Type() != mfs.TDir { errCh <- fmt.Errorf("%s: MFS destination %q is not a directory", toFilesOptionName, toFilesDst) return } toFilesDst += gopath.Base(addit.Name()) } if fileAddedToMFS && !dstAsDir { errCh <- fmt.Errorf("%s: MFS destination is a file: only one entry can be copied to %q", toFilesOptionName, toFilesDst) return } _, err = mfs.Lookup(ipfsNode.FilesRoot, gopath.Dir(toFilesDst)) if err != nil { errCh <- fmt.Errorf("%s: MFS destination parent %q %q does not exist: %w", toFilesOptionName, toFilesDst, gopath.Dir(toFilesDst), err) return } var nodeAdded ipld.Node nodeAdded, err = api.Dag().Get(req.Context, pathAdded.RootCid()) if err != nil { errCh <- err return } err = mfs.PutNode(ipfsNode.FilesRoot, toFilesDst, nodeAdded) if err != nil { errCh <- fmt.Errorf("%s: cannot put node in path %q: %w", toFilesOptionName, toFilesDst, err) return } fileAddedToMFS = true } errCh <- err }() for event := range events { output, ok := event.(*coreiface.AddEvent) if !ok { return errors.New("unknown event type") } h := "" if (output.Path != path.ImmutablePath{}) { h = enc.Encode(output.Path.RootCid()) } if !dir && addit.Name() != "" { output.Name = addit.Name() } else { output.Name = gopath.Join(addit.Name(), output.Name) } output.Mode = addit.Node().Mode() if ts := addit.Node().ModTime(); !ts.IsZero() { output.Mtime = addit.Node().ModTime().Unix() output.MtimeNsecs = addit.Node().ModTime().Nanosecond() } addEvent := AddEvent{ Name: output.Name, Hash: h, Bytes: output.Bytes, Size: output.Size, Mtime: output.Mtime, MtimeNsecs: output.MtimeNsecs, } if output.Mode != 0 { addEvent.Mode = "0" + strconv.FormatUint(uint64(output.Mode), 8) } if output.Mtime > 0 { addEvent.Mtime = output.Mtime if output.MtimeNsecs > 0 { addEvent.MtimeNsecs = output.MtimeNsecs } } if err := res.Emit(&addEvent); err != nil { return err } } if err := <-errCh; err != nil { return err } added++ } if addit.Err() != nil { return addit.Err() } if added == 0 { return fmt.Errorf("expected a file argument") } return nil }, PostRun: cmds.PostRunMap{ cmds.CLI: func(res cmds.Response, re cmds.ResponseEmitter) error { sizeChan := make(chan int64, 1) outChan := make(chan interface{}) req := res.Request() go func() { size, err := req.Files.Size() if err != nil { log.Warnf("error getting files size: %s", err) return } sizeChan <- size }() progressBar := func(wait chan struct{}) { defer close(wait) quiet, _ := req.Options[quietOptionName].(bool) quieter, _ := req.Options[quieterOptionName].(bool) quiet = quiet || quieter progress, _ := req.Options[progressOptionName].(bool) var bar *pb.ProgressBar if progress { bar = pb.New64(0).SetUnits(pb.U_BYTES) bar.ManualUpdate = true bar.ShowTimeLeft = false bar.ShowPercent = false bar.Output = os.Stderr bar.Start() } lastFile := "" lastHash := "" var totalProgress, prevFiles, lastBytes int64 LOOP: for { select { case out, ok := <-outChan: if !ok { if quieter { fmt.Fprintln(os.Stdout, lastHash) } break LOOP } output := out.(*AddEvent) if len(output.Hash) > 0 { lastHash = output.Hash if quieter { continue } if progress { fmt.Fprintf(os.Stderr, "\033[2K\r") } if quiet { fmt.Fprintf(os.Stdout, "%s\n", output.Hash) } else { fmt.Fprintf(os.Stdout, "added %s %s\n", output.Hash, cmdenv.EscNonPrint(output.Name)) } } else { if !progress { continue } if len(lastFile) == 0 { lastFile = output.Name } if output.Name != lastFile || output.Bytes < lastBytes { prevFiles += lastBytes lastFile = output.Name } lastBytes = output.Bytes delta := prevFiles + lastBytes - totalProgress totalProgress = bar.Add64(delta) } if progress { bar.Update() } case size := <-sizeChan: if progress { bar.Total = size bar.ShowPercent = true bar.ShowBar = true bar.ShowTimeLeft = true } case <-req.Context.Done(): return } } if progress && bar.Total == 0 && bar.Get() != 0 { bar.Total = bar.Get() bar.ShowPercent = true bar.ShowBar = true bar.ShowTimeLeft = true bar.Update() } } if e := res.Error(); e != nil { close(outChan) return e } wait := make(chan struct{}) go progressBar(wait) defer func() { <-wait }() defer close(outChan) for { v, err := res.Next() if err != nil { if err == io.EOF { return nil } return err } select { case outChan <- v: case <-req.Context.Done(): return req.Context.Err() } } }, }, Type: AddEvent{}, }
var BitswapCmd = &cmds.Command{ Helptext: cmds.HelpText{ Tagline: "Interact with the bitswap agent.", ShortDescription: ``, }, Subcommands: map[string]*cmds.Command{ "stat": bitswapStatCmd, "wantlist": showWantlistCmd, "ledger": ledgerCmd, "reprovide": reprovideCmd, }, }
var BlockCmd = &cmds.Command{ Helptext: cmds.HelpText{ Tagline: "Interact with raw IPFS blocks.", ShortDescription: ` 'ipfs block' is a plumbing command used to manipulate raw IPFS blocks. Reads from stdin or writes to stdout. A block is identified by a Multihash passed with a valid CID. `, }, Subcommands: map[string]*cmds.Command{ "stat": blockStatCmd, "get": blockGetCmd, "put": blockPutCmd, "rm": blockRmCmd, }, }
var BootstrapCmd = &cmds.Command{ Helptext: cmds.HelpText{ Tagline: "Show or edit the list of bootstrap peers.", ShortDescription: ` Running 'ipfs bootstrap' with no arguments will run 'ipfs bootstrap list'. ` + bootstrapSecurityWarning, }, Run: bootstrapListCmd.Run, Encoders: bootstrapListCmd.Encoders, Type: bootstrapListCmd.Type, Subcommands: map[string]*cmds.Command{ "list": bootstrapListCmd, "add": bootstrapAddCmd, "rm": bootstrapRemoveCmd, }, }
var CatCmd = &cmds.Command{ Helptext: cmds.HelpText{ Tagline: "Show IPFS object data.", ShortDescription: "Displays the data contained by an IPFS or IPNS object(s) at the given path.", }, Arguments: []cmds.Argument{ cmds.StringArg("ipfs-path", true, true, "The path to the IPFS object(s) to be outputted.").EnableStdin(), }, Options: []cmds.Option{ cmds.Int64Option(offsetOptionName, "o", "Byte offset to begin reading from."), cmds.Int64Option(lengthOptionName, "l", "Maximum number of bytes to read."), cmds.BoolOption(progressOptionName, "p", "Stream progress data.").WithDefault(true), }, Run: func(req *cmds.Request, res cmds.ResponseEmitter, env cmds.Environment) error { api, err := cmdenv.GetApi(env, req) if err != nil { return err } offset, _ := req.Options[offsetOptionName].(int64) if offset < 0 { return fmt.Errorf("cannot specify negative offset") } max, found := req.Options[lengthOptionName].(int64) if max < 0 { return fmt.Errorf("cannot specify negative length") } if !found { max = -1 } err = req.ParseBodyArgs() if err != nil { return err } readers, length, err := cat(req.Context, api, req.Arguments, int64(offset), int64(max)) if err != nil { return err } res.SetLength(length) reader := io.MultiReader(readers...) return res.Emit(reader) }, PostRun: cmds.PostRunMap{ cmds.CLI: func(res cmds.Response, re cmds.ResponseEmitter) error { if res.Length() > 0 && res.Length() < progressBarMinSize { return cmds.Copy(re, res) } for { v, err := res.Next() if err != nil { if err == io.EOF { return nil } return err } switch val := v.(type) { case io.Reader: reader := val req := res.Request() progress, _ := req.Options[progressOptionName].(bool) if progress { var bar *pb.ProgressBar bar, reader = progressBarForReader(os.Stderr, val, int64(res.Length())) bar.Start() defer bar.Finish() } err = re.Emit(reader) if err != nil { return err } default: log.Warnf("cat postrun: received unexpected type %T", val) } } }, }, }
var CidCmd = &cmds.Command{ Helptext: cmds.HelpText{ Tagline: "Convert and discover properties of CIDs", }, Subcommands: map[string]*cmds.Command{ "format": cidFmtCmd, "base32": base32Cmd, "bases": basesCmd, "codecs": codecsCmd, "hashes": hashesCmd, }, Extra: CreateCmdExtras(SetDoesNotUseRepo(true)), }
var CommandsDaemonCmd = CommandsCmd(Root)
var ConfigCmd = &cmds.Command{ Helptext: cmds.HelpText{ Tagline: "Get and set IPFS config values.", ShortDescription: ` 'ipfs config' controls configuration variables. It works like 'git config'. The configuration values are stored in a config file inside your IPFS_PATH.`, LongDescription: ` 'ipfs config' controls configuration variables. It works much like 'git config'. The configuration values are stored in a config file inside your IPFS repository (IPFS_PATH). Examples: Get the value of the 'Datastore.Path' key: $ ipfs config Datastore.Path Set the value of the 'Datastore.Path' key: $ ipfs config Datastore.Path ~/.ipfs/datastore `, }, Subcommands: map[string]*cmds.Command{ "show": configShowCmd, "edit": configEditCmd, "replace": configReplaceCmd, "profile": configProfileCmd, }, Arguments: []cmds.Argument{ cmds.StringArg("key", true, false, "The key of the config entry (e.g. \"Addresses.API\")."), cmds.StringArg("value", false, false, "The value to set the config entry to."), }, Options: []cmds.Option{ cmds.BoolOption(configBoolOptionName, "Set a boolean value."), cmds.BoolOption(configJSONOptionName, "Parse stringified JSON."), }, Run: func(req *cmds.Request, res cmds.ResponseEmitter, env cmds.Environment) error { args := req.Arguments key := args[0] var output *ConfigField switch strings.ToLower(key) { case "identity", "identity.privkey": return errors.New("cannot show or change private key through API") default: } if blocked := matchesGlobPrefix(key, config.PinningConcealSelector); blocked { return errors.New("cannot show or change pinning services credentials") } cfgRoot, err := cmdenv.GetConfigRoot(env) if err != nil { return err } r, err := fsrepo.Open(cfgRoot) if err != nil { return err } defer r.Close() if len(args) == 2 { value := args[1] if parseJSON, _ := req.Options[configJSONOptionName].(bool); parseJSON { var jsonVal interface{} if err := json.Unmarshal([]byte(value), &jsonVal); err != nil { err = fmt.Errorf("failed to unmarshal json. %s", err) return err } output, err = setConfig(r, key, jsonVal) } else if isbool, _ := req.Options[configBoolOptionName].(bool); isbool { output, err = setConfig(r, key, value == "true") } else { output, err = setConfig(r, key, value) } } else { output, err = getConfig(r, key) } if err != nil { return err } return cmds.EmitOnce(res, output) }, Encoders: cmds.EncoderMap{ cmds.Text: cmds.MakeTypedEncoder(func(req *cmds.Request, w io.Writer, out *ConfigField) error { if len(req.Arguments) == 2 { return nil } buf, err := config.HumanOutput(out.Value) if err != nil { return err } buf = append(buf, byte('\n')) _, err = w.Write(buf) return err }), }, Type: ConfigField{}, }
var DefaultBufSize = 1048576
DefaultBufSize is the buffer size for gets. for now, 1MiB, which is ~4 blocks. TODO: does this need to be configurable?
var DhtCmd = &cmds.Command{ Status: cmds.Deprecated, Helptext: cmds.HelpText{ Tagline: "Issue commands directly through the DHT.", ShortDescription: ``, }, Subcommands: map[string]*cmds.Command{ "query": queryDhtCmd, "findprovs": RemovedDHTCmd, "findpeer": RemovedDHTCmd, "get": RemovedDHTCmd, "put": RemovedDHTCmd, "provide": RemovedDHTCmd, }, }
var DiagCmd = &cmds.Command{ Helptext: cmds.HelpText{ Tagline: "Generate diagnostic reports.", }, Subcommands: map[string]*cmds.Command{ "sys": sysDiagCmd, "cmds": ActiveReqsCmd, "profile": sysProfileCmd, }, }
var ErrDepthLimitExceeded = fmt.Errorf("depth limit exceeded")
ErrDepthLimitExceeded indicates that the max depth has been exceeded.
var ErrInvalidCompressionLevel = errors.New("compression level must be between 1 and 9")
var ErrNotDHT = errors.New("routing service is not a DHT")
var ErrPingSelf = errors.New("error: can't ping self")
ErrPingSelf is returned when the user attempts to ping themself.
var FileStoreCmd = &cmds.Command{ Helptext: cmds.HelpText{ Tagline: "Interact with filestore objects.", }, Subcommands: map[string]*cmds.Command{ "ls": lsFileStore, "verify": verifyFileStore, "dups": dupsFileStore, }, }
var FilesCmd = &cmds.Command{ Helptext: cmds.HelpText{ Tagline: "Interact with unixfs files.", ShortDescription: ` Files is an API for manipulating IPFS objects as if they were a Unix filesystem. The files facility interacts with MFS (Mutable File System). MFS acts as a single, dynamic filesystem mount. MFS has a root CID that is transparently updated when a change happens (and can be checked with "ipfs files stat /"). All files and folders within MFS are respected and will not be deleted during garbage collections. However, a DAG may be referenced in MFS without being fully available locally (MFS content is lazy loaded when accessed). MFS is independent from the list of pinned items ("ipfs pin ls"). Calls to "ipfs pin add" and "ipfs pin rm" will add and remove pins independently of MFS. If MFS content that was additionally pinned is removed by calling "ipfs files rm", it will still remain pinned. Content added with "ipfs add" (which by default also becomes pinned), is not added to MFS. Any content can be lazily referenced from MFS with the command "ipfs files cp /ipfs/<cid> /some/path/" (see ipfs files cp --help). NOTE: Most of the subcommands of 'ipfs files' accept the '--flush' flag. It defaults to true. Use caution when setting this flag to false. It will improve performance for large numbers of file operations, but it does so at the cost of consistency guarantees. If the daemon is unexpectedly killed before running 'ipfs files flush' on the files in question, then data may be lost. This also applies to run 'ipfs repo gc' concurrently with '--flush=false' operations. `, }, Options: []cmds.Option{ cmds.BoolOption(filesFlushOptionName, "f", "Flush target and ancestors after write.").WithDefault(true), }, Subcommands: map[string]*cmds.Command{ "read": filesReadCmd, "write": filesWriteCmd, "mv": filesMvCmd, "cp": filesCpCmd, "ls": filesLsCmd, "mkdir": filesMkdirCmd, "stat": filesStatCmd, "rm": filesRmCmd, "flush": filesFlushCmd, "chcid": filesChcidCmd, "chmod": filesChmodCmd, "touch": filesTouchCmd, }, }
FilesCmd is the 'ipfs files' command
var GetCmd = &cmds.Command{ Helptext: cmds.HelpText{ Tagline: "Download IPFS objects.", ShortDescription: ` Stores to disk the data contained an IPFS or IPNS object(s) at the given path. By default, the output will be stored at './<ipfs-path>', but an alternate path can be specified with '--output=<path>' or '-o=<path>'. To output a TAR archive instead of unpacked files, use '--archive' or '-a'. To compress the output with GZIP compression, use '--compress' or '-C'. You may also specify the level of compression by specifying '-l=<1-9>'. `, }, Arguments: []cmds.Argument{ cmds.StringArg("ipfs-path", true, false, "The path to the IPFS object(s) to be outputted.").EnableStdin(), }, Options: []cmds.Option{ cmds.StringOption(outputOptionName, "o", "The path where the output should be stored."), cmds.BoolOption(archiveOptionName, "a", "Output a TAR archive."), cmds.BoolOption(compressOptionName, "C", "Compress the output with GZIP compression."), cmds.IntOption(compressionLevelOptionName, "l", "The level of compression (1-9)."), cmds.BoolOption(progressOptionName, "p", "Stream progress data.").WithDefault(true), }, PreRun: func(req *cmds.Request, env cmds.Environment) error { _, err := getCompressOptions(req) return err }, Run: func(req *cmds.Request, res cmds.ResponseEmitter, env cmds.Environment) error { ctx := req.Context cmplvl, err := getCompressOptions(req) if err != nil { return err } api, err := cmdenv.GetApi(env, req) if err != nil { return err } p, err := cmdutils.PathOrCidPath(req.Arguments[0]) if err != nil { return err } file, err := api.Unixfs().Get(ctx, p) if err != nil { return err } size, err := file.Size() if err != nil { return err } res.SetLength(uint64(size)) archive, _ := req.Options[archiveOptionName].(bool) reader, err := fileArchive(file, p.String(), archive, cmplvl) if err != nil { return err } go func() { <-ctx.Done() reader.Close() }() return res.Emit(reader) }, PostRun: cmds.PostRunMap{ cmds.CLI: func(res cmds.Response, re cmds.ResponseEmitter) error { req := res.Request() v, err := res.Next() if err != nil { return err } outReader, ok := v.(io.Reader) if !ok { return e.New(e.TypeErr(outReader, v)) } outPath := getOutPath(req) cmplvl, err := getCompressOptions(req) if err != nil { return err } archive, _ := req.Options[archiveOptionName].(bool) progress, _ := req.Options[progressOptionName].(bool) gw := getWriter{ Out: os.Stdout, Err: os.Stderr, Archive: archive, Compression: cmplvl, Size: int64(res.Length()), Progress: progress, } return gw.Write(outReader, outPath) }, }, }
var HumanJSONEncoder = cmds.MakeTypedEncoder(func(req *cmds.Request, w io.Writer, out *map[string]interface{}) error { buf, err := config.HumanOutput(out) if err != nil { return err } buf = append(buf, byte('\n')) _, err = w.Write(buf) return err })
var IDCmd = &cmds.Command{ Helptext: cmds.HelpText{ Tagline: "Show IPFS node id info.", ShortDescription: ` Prints out information about the specified peer. If no peer is specified, prints out information for local peers. 'ipfs id' supports the format option for output with the following keys: <id> : The peers id. <aver>: Agent version. <pver>: Protocol version. <pubkey>: Public key. <addrs>: Addresses (newline delimited). <protocols>: Libp2p Protocol registrations (newline delimited). EXAMPLE: ipfs id Qmece2RkXhsKe5CRooNisBTh4SK119KrXXGmoK6V3kb8aH -f="<addrs>\n" `, }, Arguments: []cmds.Argument{ cmds.StringArg("peerid", false, false, "Peer.ID of node to look up."), }, Options: []cmds.Option{ cmds.StringOption(formatOptionName, "f", "Optional output format."), cmds.StringOption(idFormatOptionName, "Encoding used for peer IDs: Can either be a multibase encoded CID or a base58btc encoded multihash. Takes {b58mh|base36|k|base32|b...}.").WithDefault("b58mh"), }, Run: func(req *cmds.Request, res cmds.ResponseEmitter, env cmds.Environment) error { keyEnc, err := ke.KeyEncoderFromString(req.Options[idFormatOptionName].(string)) if err != nil { return err } n, err := cmdenv.GetNode(env) if err != nil { return err } var id peer.ID if len(req.Arguments) > 0 { var err error id, err = peer.Decode(req.Arguments[0]) if err != nil { return fmt.Errorf("invalid peer id") } } else { id = n.Identity } if id == n.Identity { output, err := printSelf(keyEnc, n) if err != nil { return err } return cmds.EmitOnce(res, output) } offline, _ := req.Options[OfflineOption].(bool) if !offline && !n.IsOnline { return errors.New(offlineIDErrorMessage) } if !offline { err = n.PeerHost.Connect(req.Context, peer.AddrInfo{ID: id}) switch err { case nil: case kb.ErrLookupFailure: return errors.New(offlineIDErrorMessage) default: return err } } output, err := printPeer(keyEnc, n.Peerstore, id) if err != nil { return err } return cmds.EmitOnce(res, output) }, Encoders: cmds.EncoderMap{ cmds.Text: cmds.MakeTypedEncoder(func(req *cmds.Request, w io.Writer, out *IdOutput) error { format, found := req.Options[formatOptionName].(string) if found { output := format output = strings.Replace(output, "<id>", out.ID, -1) output = strings.Replace(output, "<aver>", out.AgentVersion, -1) output = strings.Replace(output, "<pubkey>", out.PublicKey, -1) output = strings.Replace(output, "<addrs>", strings.Join(out.Addresses, "\n"), -1) output = strings.Replace(output, "<protocols>", strings.Join(protocol.ConvertToStrings(out.Protocols), "\n"), -1) output = strings.Replace(output, "\\n", "\n", -1) output = strings.Replace(output, "\\t", "\t", -1) fmt.Fprint(w, output) } else { marshaled, err := json.MarshalIndent(out, "", "\t") if err != nil { return err } marshaled = append(marshaled, byte('\n')) fmt.Fprintln(w, string(marshaled)) } return nil }), }, Type: IdOutput{}, }
var KeyCmd = &cmds.Command{ Helptext: cmds.HelpText{ Tagline: "Create and list IPNS name keypairs", ShortDescription: ` 'ipfs key gen' generates a new keypair for usage with IPNS and 'ipfs name publish'. > ipfs key gen --type=rsa --size=2048 mykey > ipfs name publish --key=mykey QmSomeHash 'ipfs key list' lists the available keys. > ipfs key list self mykey `, }, Subcommands: map[string]*cmds.Command{ "gen": keyGenCmd, "export": keyExportCmd, "import": keyImportCmd, "list": keyListCmd, "rename": keyRenameCmd, "rm": keyRmCmd, "rotate": keyRotateCmd, "sign": keySignCmd, "verify": keyVerifyCmd, }, }
var LogCmd = &cmds.Command{ Helptext: cmds.HelpText{ Tagline: "Interact with the daemon log output.", ShortDescription: ` 'ipfs log' contains utility commands to affect or read the logging output of a running daemon. There are also two environmental variables that direct the logging system (not just for the daemon logs, but all commands): IPFS_LOGGING - sets the level of verbosity of the logging. One of: debug, info, warn, error, dpanic, panic, fatal IPFS_LOGGING_FMT - sets formatting of the log output. One of: color, nocolor `, }, Subcommands: map[string]*cmds.Command{ "level": logLevelCmd, "ls": logLsCmd, "tail": logTailCmd, }, }
var LsCmd = &cmds.Command{ Helptext: cmds.HelpText{ Tagline: "List directory contents for Unix filesystem objects.", ShortDescription: ` Displays the contents of an IPFS or IPNS object(s) at the given path, with the following format: <link base58 hash> <link size in bytes> <link name> The JSON output contains type information. `, }, Arguments: []cmds.Argument{ cmds.StringArg("ipfs-path", true, true, "The path to the IPFS object(s) to list links from.").EnableStdin(), }, Options: []cmds.Option{ cmds.BoolOption(lsHeadersOptionNameTime, "v", "Print table headers (Hash, Size, Name)."), cmds.BoolOption(lsResolveTypeOptionName, "Resolve linked objects to find out their types.").WithDefault(true), cmds.BoolOption(lsSizeOptionName, "Resolve linked objects to find out their file size.").WithDefault(true), cmds.BoolOption(lsStreamOptionName, "s", "Enable experimental streaming of directory entries as they are traversed."), }, Run: func(req *cmds.Request, res cmds.ResponseEmitter, env cmds.Environment) error { api, err := cmdenv.GetApi(env, req) if err != nil { return err } resolveType, _ := req.Options[lsResolveTypeOptionName].(bool) resolveSize, _ := req.Options[lsSizeOptionName].(bool) stream, _ := req.Options[lsStreamOptionName].(bool) err = req.ParseBodyArgs() if err != nil { return err } paths := req.Arguments enc, err := cmdenv.GetCidEncoder(req) if err != nil { return err } var processLink func(path string, link LsLink) error var dirDone func(i int) processDir := func() (func(path string, link LsLink) error, func(i int)) { return func(path string, link LsLink) error { output := []LsObject{{ Hash: path, Links: []LsLink{link}, }} return res.Emit(&LsOutput{output}) }, func(i int) {} } done := func() error { return nil } if !stream { output := make([]LsObject, len(req.Arguments)) processDir = func() (func(path string, link LsLink) error, func(i int)) { outputLinks := make([]LsLink, 0) return func(path string, link LsLink) error { outputLinks = append(outputLinks, link) return nil }, func(i int) { sort.Slice(outputLinks, func(i, j int) bool { return outputLinks[i].Name < outputLinks[j].Name }) output[i] = LsObject{ Hash: paths[i], Links: outputLinks, } } } done = func() error { return cmds.EmitOnce(res, &LsOutput{output}) } } for i, fpath := range paths { pth, err := cmdutils.PathOrCidPath(fpath) if err != nil { return err } results, err := api.Unixfs().Ls(req.Context, pth, options.Unixfs.ResolveChildren(resolveSize || resolveType)) if err != nil { return err } processLink, dirDone = processDir() for link := range results { if link.Err != nil { return link.Err } var ftype unixfs_pb.Data_DataType switch link.Type { case iface.TFile: ftype = unixfs.TFile case iface.TDirectory: ftype = unixfs.TDirectory case iface.TSymlink: ftype = unixfs.TSymlink } lsLink := LsLink{ Name: link.Name, Hash: enc.Encode(link.Cid), Size: link.Size, Type: ftype, Target: link.Target, Mode: link.Mode, ModTime: link.ModTime, } if err := processLink(paths[i], lsLink); err != nil { return err } } dirDone(i) } return done() }, PostRun: cmds.PostRunMap{ cmds.CLI: func(res cmds.Response, re cmds.ResponseEmitter) error { req := res.Request() lastObjectHash := "" for { v, err := res.Next() if err != nil { if err == io.EOF { return nil } return err } out := v.(*LsOutput) lastObjectHash = tabularOutput(req, os.Stdout, out, lastObjectHash, false) } }, }, Encoders: cmds.EncoderMap{ cmds.Text: cmds.MakeTypedEncoder(func(req *cmds.Request, w io.Writer, out *LsOutput) error { ignoreBreaks, _ := req.Options[lsStreamOptionName].(bool) tabularOutput(req, w, out, "", ignoreBreaks) return nil }), }, Type: LsOutput{}, }
var MbaseCmd = &cmds.Command{ Helptext: cmds.HelpText{ Tagline: "Encode and decode files or stdin with multibase format", }, Subcommands: map[string]*cmds.Command{ "encode": mbaseEncodeCmd, "decode": mbaseDecodeCmd, "transcode": mbaseTranscodeCmd, "list": basesCmd, }, Extra: CreateCmdExtras(SetDoesNotUseRepo(true)), }
var MountCmd = &cmds.Command{ Status: cmds.Experimental, Helptext: cmds.HelpText{ Tagline: "Mounts IPFS to the filesystem (read-only).", ShortDescription: ` Mount IPFS at a read-only mountpoint on the OS (default: /ipfs and /ipns). All IPFS objects will be accessible under that directory. Note that the root will not be listable, as it is virtual. Access known paths directly. You may have to create /ipfs and /ipns before using 'ipfs mount': > sudo mkdir /ipfs /ipns > sudo chown $(whoami) /ipfs /ipns > ipfs daemon & > ipfs mount `, LongDescription: ` Mount IPFS at a read-only mountpoint on the OS. The default, /ipfs and /ipns, are set in the configuration file, but can be overridden by the options. All IPFS objects will be accessible under this directory. Note that the root will not be listable, as it is virtual. Access known paths directly. You may have to create /ipfs and /ipns before using 'ipfs mount': > sudo mkdir /ipfs /ipns > sudo chown $(whoami) /ipfs /ipns > ipfs daemon & > ipfs mount Example: # setup > mkdir foo > echo "baz" > foo/bar > ipfs add -r foo added QmWLdkp93sNxGRjnFHPaYg8tCQ35NBY3XPn6KiETd3Z4WR foo/bar added QmSh5e7S6fdcu75LAbXNZAFY2nGyZUJXyLCJDvn2zRkWyC foo > ipfs ls QmSh5e7S6fdcu75LAbXNZAFY2nGyZUJXyLCJDvn2zRkWyC QmWLdkp93sNxGRjnFHPaYg8tCQ35NBY3XPn6KiETd3Z4WR 12 bar > ipfs cat QmWLdkp93sNxGRjnFHPaYg8tCQ35NBY3XPn6KiETd3Z4WR baz # mount > ipfs daemon & > ipfs mount IPFS mounted at: /ipfs IPNS mounted at: /ipns > cd /ipfs/QmSh5e7S6fdcu75LAbXNZAFY2nGyZUJXyLCJDvn2zRkWyC > ls bar > cat bar baz > cat /ipfs/QmSh5e7S6fdcu75LAbXNZAFY2nGyZUJXyLCJDvn2zRkWyC/bar baz > cat /ipfs/QmWLdkp93sNxGRjnFHPaYg8tCQ35NBY3XPn6KiETd3Z4WR baz `, }, Options: []cmds.Option{ cmds.StringOption(mountIPFSPathOptionName, "f", "The path where IPFS should be mounted."), cmds.StringOption(mountIPNSPathOptionName, "n", "The path where IPNS should be mounted."), }, Run: func(req *cmds.Request, res cmds.ResponseEmitter, env cmds.Environment) error { cfg, err := env.(*oldcmds.Context).GetConfig() if err != nil { return err } nd, err := cmdenv.GetNode(env) if err != nil { return err } if !nd.IsOnline { return ErrNotOnline } fsdir, found := req.Options[mountIPFSPathOptionName].(string) if !found { fsdir = cfg.Mounts.IPFS } nsdir, found := req.Options[mountIPNSPathOptionName].(string) if !found { nsdir = cfg.Mounts.IPNS } err = nodeMount.Mount(nd, fsdir, nsdir) if err != nil { return err } var output config.Mounts output.IPFS = fsdir output.IPNS = nsdir return cmds.EmitOnce(res, &output) }, Type: config.Mounts{}, Encoders: cmds.EncoderMap{ cmds.Text: cmds.MakeTypedEncoder(func(req *cmds.Request, w io.Writer, mounts *config.Mounts) error { fmt.Fprintf(w, "IPFS mounted at: %s\n", cmdenv.EscNonPrint(mounts.IPFS)) fmt.Fprintf(w, "IPNS mounted at: %s\n", cmdenv.EscNonPrint(mounts.IPNS)) return nil }), }, }
var P2PCmd = &cmds.Command{ Status: cmds.Experimental, Helptext: cmds.HelpText{ Tagline: "Libp2p stream mounting.", ShortDescription: ` Create and use tunnels to remote peers over libp2p Note: this command is experimental and subject to change as usecases and APIs are refined`, }, Subcommands: map[string]*cmds.Command{ "stream": p2pStreamCmd, "forward": p2pForwardCmd, "listen": p2pListenCmd, "close": p2pCloseCmd, "ls": p2pLsCmd, }, }
P2PCmd is the 'ipfs p2p' command
var PingCmd = &cmds.Command{ Helptext: cmds.HelpText{ Tagline: "Send echo request packets to IPFS hosts.", ShortDescription: ` 'ipfs ping' is a tool to test sending data to other nodes. It finds nodes via the routing system, sends pings, waits for pongs, and prints out round- trip latency information. `, }, Arguments: []cmds.Argument{ cmds.StringArg("peer ID", true, true, "ID of peer to be pinged.").EnableStdin(), }, Options: []cmds.Option{ cmds.IntOption(pingCountOptionName, "n", "Number of ping messages to send.").WithDefault(10), }, Run: func(req *cmds.Request, res cmds.ResponseEmitter, env cmds.Environment) error { n, err := cmdenv.GetNode(env) if err != nil { return err } if !n.IsOnline { return ErrNotOnline } addr, pid, err := ParsePeerParam(req.Arguments[0]) if err != nil { return fmt.Errorf("failed to parse peer address '%s': %s", req.Arguments[0], err) } if pid == n.Identity { return ErrPingSelf } if addr != nil { n.Peerstore.AddAddr(pid, addr, pstore.TempAddrTTL) } numPings, _ := req.Options[pingCountOptionName].(int) if numPings <= 0 { return fmt.Errorf("ping count must be greater than 0, was %d", numPings) } if len(n.Peerstore.Addrs(pid)) == 0 { if err := res.Emit(&PingResult{ Text: fmt.Sprintf("Looking up peer %s", pid), Success: true, }); err != nil { return err } ctx, cancel := context.WithTimeout(req.Context, kPingTimeout) p, err := n.Routing.FindPeer(ctx, pid) cancel() if err != nil { return fmt.Errorf("peer lookup failed: %s", err) } n.Peerstore.AddAddrs(p.ID, p.Addrs, pstore.TempAddrTTL) } if err := res.Emit(&PingResult{ Text: fmt.Sprintf("PING %s.", pid), Success: true, }); err != nil { return err } ctx, cancel := context.WithTimeout(req.Context, kPingTimeout*time.Duration(numPings)) defer cancel() pings := ping.Ping(ctx, n.PeerHost, pid) var ( count int total time.Duration ) ticker := time.NewTicker(time.Second) defer ticker.Stop() for i := 0; i < numPings; i++ { r, ok := <-pings if !ok { break } if r.Error != nil { err = res.Emit(&PingResult{ Success: false, Text: fmt.Sprintf("Ping error: %s", r.Error), }) } else { count++ total += r.RTT err = res.Emit(&PingResult{ Success: true, Time: r.RTT, }) } if err != nil { return err } select { case <-ticker.C: case <-ctx.Done(): return ctx.Err() } } if count == 0 { return fmt.Errorf("ping failed") } averagems := total.Seconds() * 1000 / float64(count) return res.Emit(&PingResult{ Success: true, Text: fmt.Sprintf("Average latency: %.2fms", averagems), }) }, Type: PingResult{}, PostRun: cmds.PostRunMap{ cmds.CLI: func(res cmds.Response, re cmds.ResponseEmitter) error { var ( total time.Duration count int ) for { event, err := res.Next() switch err { case nil: case io.EOF: return nil case context.Canceled, context.DeadlineExceeded: if count == 0 { return err } averagems := total.Seconds() * 1000 / float64(count) return re.Emit(&PingResult{ Success: true, Text: fmt.Sprintf("Average latency: %.2fms", averagems), }) default: return err } pr := event.(*PingResult) if pr.Success && pr.Text == "" { total += pr.Time count++ } err = re.Emit(event) if err != nil { return err } } }, }, Encoders: cmds.EncoderMap{ cmds.Text: cmds.MakeTypedEncoder(func(req *cmds.Request, w io.Writer, out *PingResult) error { if len(out.Text) > 0 { fmt.Fprintln(w, out.Text) } else if out.Success { fmt.Fprintf(w, "Pong received: time=%.2f ms\n", out.Time.Seconds()*1000) } else { fmt.Fprintf(w, "Pong failed\n") } return nil }), }, }
var PubsubCmd = &cmds.Command{ Status: cmds.Deprecated, Helptext: cmds.HelpText{ Tagline: "An experimental publish-subscribe system on ipfs.", ShortDescription: ` ipfs pubsub allows you to publish messages to a given topic, and also to subscribe to new messages on a given topic. DEPRECATED FEATURE (see https://github.com/ipfs/kubo/issues/9717) It is not intended in its current state to be used in a production environment. To use, the daemon must be run with '--enable-pubsub-experiment'. `, }, Subcommands: map[string]*cmds.Command{ "pub": PubsubPubCmd, "sub": PubsubSubCmd, "ls": PubsubLsCmd, "peers": PubsubPeersCmd, }, }
var PubsubLsCmd = &cmds.Command{ Status: cmds.Deprecated, Helptext: cmds.HelpText{ Tagline: "List subscribed topics by name.", ShortDescription: ` ipfs pubsub ls lists out the names of topics you are currently subscribed to. DEPRECATED FEATURE (see https://github.com/ipfs/kubo/issues/9717) It is not intended in its current state to be used in a production environment. To use, the daemon must be run with '--enable-pubsub-experiment'. TOPIC ENCODING Topic names are a binary data. To ensure all bytes are transferred correctly RPC client and server will use multibase encoding behind the scenes. You can inspect the format by passing --enc=json. ipfs multibase commands can be used for encoding/decoding multibase strings in the userland. `, }, Run: func(req *cmds.Request, res cmds.ResponseEmitter, env cmds.Environment) error { api, err := cmdenv.GetApi(env, req) if err != nil { return err } l, err := api.PubSub().Ls(req.Context) if err != nil { return err } encoder, _ := mbase.EncoderByName("base64url") for n, topic := range l { l[n] = encoder.Encode([]byte(topic)) } return cmds.EmitOnce(res, stringList{l}) }, Type: stringList{}, Encoders: cmds.EncoderMap{ cmds.Text: cmds.MakeTypedEncoder(multibaseDecodedStringListEncoder), }, }
var PubsubPeersCmd = &cmds.Command{ Status: cmds.Deprecated, Helptext: cmds.HelpText{ Tagline: "List peers we are currently pubsubbing with.", ShortDescription: ` ipfs pubsub peers with no arguments lists out the pubsub peers you are currently connected to. If given a topic, it will list connected peers who are subscribed to the named topic. DEPRECATED FEATURE (see https://github.com/ipfs/kubo/issues/9717) It is not intended in its current state to be used in a production environment. To use, the daemon must be run with '--enable-pubsub-experiment'. TOPIC AND DATA ENCODING Topic names are a binary data. To ensure all bytes are transferred correctly RPC client and server will use multibase encoding behind the scenes. You can inspect the format by passing --enc=json. ipfs multibase commands can be used for encoding/decoding multibase strings in the userland. `, }, Arguments: []cmds.Argument{ cmds.StringArg("topic", false, false, "Topic to list connected peers of."), }, PreRun: urlArgsEncoder, Run: func(req *cmds.Request, res cmds.ResponseEmitter, env cmds.Environment) error { api, err := cmdenv.GetApi(env, req) if err != nil { return err } if err := urlArgsDecoder(req, env); err != nil { return err } var topic string if len(req.Arguments) == 1 { topic = req.Arguments[0] } peers, err := api.PubSub().Peers(req.Context, options.PubSub.Topic(topic)) if err != nil { return err } list := &stringList{make([]string, 0, len(peers))} for _, peer := range peers { list.Strings = append(list.Strings, peer.String()) } sort.Strings(list.Strings) return cmds.EmitOnce(res, list) }, Type: stringList{}, Encoders: cmds.EncoderMap{ cmds.Text: cmds.MakeTypedEncoder(safeTextListEncoder), }, }
var PubsubPubCmd = &cmds.Command{ Status: cmds.Deprecated, Helptext: cmds.HelpText{ Tagline: "Publish data to a given pubsub topic.", ShortDescription: ` ipfs pubsub pub publishes a message to a specified topic. It reads binary data from stdin or a file. DEPRECATED FEATURE (see https://github.com/ipfs/kubo/issues/9717) It is not intended in its current state to be used in a production environment. To use, the daemon must be run with '--enable-pubsub-experiment'. HTTP RPC ENCODING The data to be published is sent in HTTP request body as multipart/form-data. Topic names are binary data too. To ensure all bytes are transferred correctly via URL params, the RPC client and server will use multibase encoding behind the scenes. `, }, Arguments: []cmds.Argument{ cmds.StringArg("topic", true, false, "Topic to publish to (multibase encoded when sent over HTTP RPC)."), cmds.FileArg("data", true, false, "The data to be published.").EnableStdin(), }, PreRun: urlArgsEncoder, Run: func(req *cmds.Request, res cmds.ResponseEmitter, env cmds.Environment) error { api, err := cmdenv.GetApi(env, req) if err != nil { return err } if err := urlArgsDecoder(req, env); err != nil { return err } topic := req.Arguments[0] file, err := cmdenv.GetFileArg(req.Files.Entries()) if err != nil { return err } defer file.Close() data, err := io.ReadAll(file) if err != nil { return err } return api.PubSub().Publish(req.Context, topic, data) }, }
var PubsubSubCmd = &cmds.Command{ Status: cmds.Deprecated, Helptext: cmds.HelpText{ Tagline: "Subscribe to messages on a given topic.", ShortDescription: ` ipfs pubsub sub subscribes to messages on a given topic. DEPRECATED FEATURE (see https://github.com/ipfs/kubo/issues/9717) It is not intended in its current state to be used in a production environment. To use, the daemon must be run with '--enable-pubsub-experiment'. PEER ENCODING Peer IDs in From fields are encoded using the default text representation from go-libp2p. This ensures the same string values as in 'ipfs pubsub peers'. TOPIC AND DATA ENCODING Topics, Data and Seqno are binary data. To ensure all bytes are transferred correctly the RPC client and server will use multibase encoding behind the scenes. You can inspect the format by passing --enc=json. The ipfs multibase commands can be used for encoding/decoding multibase strings in the userland. `, }, Arguments: []cmds.Argument{ cmds.StringArg("topic", true, false, "Name of topic to subscribe to (multibase encoded when sent over HTTP RPC)."), }, PreRun: urlArgsEncoder, Run: func(req *cmds.Request, res cmds.ResponseEmitter, env cmds.Environment) error { api, err := cmdenv.GetApi(env, req) if err != nil { return err } if err := urlArgsDecoder(req, env); err != nil { return err } topic := req.Arguments[0] sub, err := api.PubSub().Subscribe(req.Context, topic) if err != nil { return err } defer sub.Close() if f, ok := res.(http.Flusher); ok { f.Flush() } for { msg, err := sub.Next(req.Context) if err == io.EOF || err == context.Canceled { return nil } else if err != nil { return err } encoder, _ := mbase.EncoderByName("base64url") psm := pubsubMessage{ Data: encoder.Encode(msg.Data()), From: msg.From().String(), Seqno: encoder.Encode(msg.Seq()), } for _, topic := range msg.Topics() { psm.TopicIDs = append(psm.TopicIDs, encoder.Encode([]byte(topic))) } if err := res.Emit(&psm); err != nil { return err } } }, Encoders: cmds.EncoderMap{ cmds.Text: cmds.MakeTypedEncoder(func(req *cmds.Request, w io.Writer, psm *pubsubMessage) error { _, dec, err := mbase.Decode(psm.Data) if err != nil { return err } _, err = w.Write(dec) return err }), "ndpayload": cmds.MakeTypedEncoder(func(req *cmds.Request, w io.Writer, psm *pubsubMessage) error { return errors.New("--enc=ndpayload was removed, use --enc=json instead") }), "lenpayload": cmds.MakeTypedEncoder(func(req *cmds.Request, w io.Writer, psm *pubsubMessage) error { return errors.New("--enc=lenpayload was removed, use --enc=json instead") }), }, Type: pubsubMessage{}, }
var RefsCmd = &cmds.Command{ Helptext: cmds.HelpText{ Tagline: "List links (references) from an object.", ShortDescription: ` Lists the hashes of all the links an IPFS or IPNS object(s) contains, with the following format: <link base58 hash> List all references recursively by using the flag '-r'. NOTE: Like most other commands, Kubo will try to fetch the blocks of the passed path if they can't be found in the local store if it is running in online mode. `, }, Subcommands: map[string]*cmds.Command{ "local": RefsLocalCmd, }, Arguments: []cmds.Argument{ cmds.StringArg("ipfs-path", true, true, "Path to the object(s) to list refs from.").EnableStdin(), }, Options: []cmds.Option{ cmds.StringOption(refsFormatOptionName, "Emit edges with given format. Available tokens: <src> <dst> <linkname>.").WithDefault("<dst>"), cmds.BoolOption(refsEdgesOptionName, "e", "Emit edge format: `<from> -> <to>`."), cmds.BoolOption(refsUniqueOptionName, "u", "Omit duplicate refs from output."), cmds.BoolOption(refsRecursiveOptionName, "r", "Recursively list links of child nodes."), cmds.IntOption(refsMaxDepthOptionName, "Only for recursive refs, limits fetch and listing to the given depth").WithDefault(-1), }, Run: func(req *cmds.Request, res cmds.ResponseEmitter, env cmds.Environment) error { err := req.ParseBodyArgs() if err != nil { return err } ctx := req.Context api, err := cmdenv.GetApi(env, req) if err != nil { return err } enc, err := cmdenv.GetCidEncoder(req) if err != nil { return err } unique, _ := req.Options[refsUniqueOptionName].(bool) recursive, _ := req.Options[refsRecursiveOptionName].(bool) maxDepth, _ := req.Options[refsMaxDepthOptionName].(int) edges, _ := req.Options[refsEdgesOptionName].(bool) format, _ := req.Options[refsFormatOptionName].(string) if !recursive { maxDepth = 1 } if edges { if format != "<dst>" { return errors.New("using format argument with edges is not allowed") } format = "<src> -> <dst>" } objs, err := objectsForPaths(ctx, api, req.Arguments) if err != nil { return err } rw := RefWriter{ res: res, DAG: merkledag.NewSession(ctx, api.Dag()), Ctx: ctx, Unique: unique, PrintFmt: format, MaxDepth: maxDepth, } for _, o := range objs { if _, err := rw.WriteRefs(o, enc); err != nil { if err := res.Emit(&RefWrapper{Err: err.Error()}); err != nil { return err } } } return nil }, Encoders: refsEncoderMap, Type: RefWrapper{}, }
RefsCmd is the `ipfs refs` command
var RefsLocalCmd = &cmds.Command{ Helptext: cmds.HelpText{ Tagline: "List all local references.", ShortDescription: ` Displays the hashes of all local objects. NOTE: This treats all local objects as "raw blocks" and returns CIDv1-Raw CIDs. `, }, Run: func(req *cmds.Request, res cmds.ResponseEmitter, env cmds.Environment) error { ctx := req.Context n, err := cmdenv.GetNode(env) if err != nil { return err } allKeys, err := n.Blockstore.AllKeysChan(ctx) if err != nil { return err } for k := range allKeys { err := res.Emit(&RefWrapper{Ref: k.String()}) if err != nil { return err } } return nil }, Encoders: refsEncoderMap, Type: RefWrapper{}, }
var RemovedDHTCmd = &cmds.Command{ Status: cmds.Removed, Helptext: cmds.HelpText{ Tagline: "Removed, use 'ipfs routing' instead.", }, Run: func(req *cmds.Request, res cmds.ResponseEmitter, env cmds.Environment) error { return errors.New("removed, use 'ipfs routing' instead") }, }
var RepoCmd = &cmds.Command{ Helptext: cmds.HelpText{ Tagline: "Manipulate the IPFS repo.", ShortDescription: ` 'ipfs repo' is a plumbing command used to manipulate the repo. `, }, Subcommands: map[string]*cmds.Command{ "stat": repoStatCmd, "gc": repoGcCmd, "version": repoVersionCmd, "verify": repoVerifyCmd, "migrate": repoMigrateCmd, "ls": RefsLocalCmd, }, }
var ResolveCmd = &cmds.Command{ Helptext: cmds.HelpText{ Tagline: "Resolve the value of names to IPFS.", ShortDescription: ` There are a number of mutable name protocols that can link among themselves and into IPNS. This command accepts any of these identifiers and resolves them to the referenced item. `, LongDescription: ` There are a number of mutable name protocols that can link among themselves and into IPNS. For example IPNS references can (currently) point at an IPFS object, and DNS links can point at other DNS links, IPNS entries, or IPFS objects. This command accepts any of these identifiers and resolves them to the referenced item. EXAMPLES Resolve the value of your identity: $ ipfs resolve /ipns/QmatmE9msSfkKxoffpHwNLNKgwZG8eT9Bud6YoPab52vpy /ipfs/Qmcqtw8FfrVSBaRmbWwHxt3AuySBhJLcvmFYi3Lbc4xnwj Resolve the value of another name: $ ipfs resolve /ipns/QmbCMUZw6JFeZ7Wp9jkzbye3Fzp2GGcPgC3nmeUjfVF87n /ipns/QmatmE9msSfkKxoffpHwNLNKgwZG8eT9Bud6YoPab52vpy Resolve the value of another name recursively: $ ipfs resolve -r /ipns/QmbCMUZw6JFeZ7Wp9jkzbye3Fzp2GGcPgC3nmeUjfVF87n /ipfs/Qmcqtw8FfrVSBaRmbWwHxt3AuySBhJLcvmFYi3Lbc4xnwj Resolve the value of an IPFS DAG path: $ ipfs resolve /ipfs/QmeZy1fGbwgVSrqbfh9fKQrAWgeyRnj7h8fsHS1oy3k99x/beep/boop /ipfs/QmYRMjyvAiHKN9UTi8Bzt1HUspmSRD8T8DwxfSMzLgBon1 `, }, Arguments: []cmds.Argument{ cmds.StringArg("name", true, false, "The name to resolve.").EnableStdin(), }, Options: []cmds.Option{ cmds.BoolOption(resolveRecursiveOptionName, "r", "Resolve until the result is an IPFS name.").WithDefault(true), cmds.IntOption(resolveDhtRecordCountOptionName, "dhtrc", "Number of records to request for DHT resolution."), cmds.StringOption(resolveDhtTimeoutOptionName, "dhtt", "Max time to collect values during DHT resolution e.g. \"30s\". Pass 0 for no timeout."), }, Run: func(req *cmds.Request, res cmds.ResponseEmitter, env cmds.Environment) error { api, err := cmdenv.GetApi(env, req) if err != nil { return err } name := req.Arguments[0] recursive, _ := req.Options[resolveRecursiveOptionName].(bool) if strings.HasPrefix(name, "/ipns/") && !recursive { rc, rcok := req.Options[resolveDhtRecordCountOptionName].(uint) dhtt, dhttok := req.Options[resolveDhtTimeoutOptionName].(string) ropts := []options.NameResolveOption{ options.Name.ResolveOption(ns.ResolveWithDepth(1)), } if rcok { ropts = append(ropts, options.Name.ResolveOption(ns.ResolveWithDhtRecordCount(rc))) } if dhttok { d, err := time.ParseDuration(dhtt) if err != nil { return err } if d < 0 { return errors.New("DHT timeout value must be >= 0") } ropts = append(ropts, options.Name.ResolveOption(ns.ResolveWithDhtTimeout(d))) } p, err := api.Name().Resolve(req.Context, name, ropts...) if err != nil && err != ns.ErrResolveRecursion { return err } return cmds.EmitOnce(res, &ncmd.ResolvedPath{Path: p.String()}) } var enc cidenc.Encoder switch { case !cmdenv.CidBaseDefined(req) && !strings.HasPrefix(name, "/ipns/"): enc, err = cmdenv.CidEncoderFromPath(name) if err == nil { break } fallthrough default: enc, err = cmdenv.GetCidEncoder(req) if err != nil { return err } } p, err := cmdutils.PathOrCidPath(name) if err != nil { return err } rp, remainder, err := api.ResolvePath(req.Context, p) if err != nil { return err } encodedPath := "/" + rp.Namespace() + "/" + enc.Encode(rp.RootCid()) if len(remainder) != 0 { encodedPath += path.SegmentsToString(remainder...) } ep, err := path.NewPath(encodedPath) if err != nil { return err } return cmds.EmitOnce(res, &ncmd.ResolvedPath{Path: ep.String()}) }, Encoders: cmds.EncoderMap{ cmds.Text: cmds.MakeTypedEncoder(func(req *cmds.Request, w io.Writer, rp *ncmd.ResolvedPath) error { fmt.Fprintln(w, rp.Path) return nil }), }, Type: ncmd.ResolvedPath{}, }
var Root = &cmds.Command{ Helptext: cmds.HelpText{ Tagline: "Global p2p merkle-dag filesystem.", Synopsis: "ipfs [--config=<config> | -c] [--debug | -D] [--help] [-h] [--api=<api>] [--offline] [--cid-base=<base>] [--upgrade-cidv0-in-output] [--encoding=<encoding> | --enc] [--timeout=<timeout>] <command> ...", Subcommands: ` BASIC COMMANDS init Initialize local IPFS configuration add <path> Add a file to IPFS cat <ref> Show IPFS object data get <ref> Download IPFS objects ls <ref> List links from an object refs <ref> List hashes of links from an object DATA STRUCTURE COMMANDS dag Interact with IPLD DAG nodes files Interact with files as if they were a unix filesystem block Interact with raw blocks in the datastore TEXT ENCODING COMMANDS cid Convert and discover properties of CIDs multibase Encode and decode data with Multibase format ADVANCED COMMANDS daemon Start a long-running daemon process shutdown Shut down the daemon process resolve Resolve any type of content path name Publish and resolve IPNS names key Create and list IPNS name keypairs pin Pin objects to local storage repo Manipulate the IPFS repository stats Various operational stats p2p Libp2p stream mounting (experimental) filestore Manage the filestore (experimental) mount Mount an IPFS read-only mount point (experimental) NETWORK COMMANDS id Show info about IPFS peers bootstrap Add or remove bootstrap peers swarm Manage connections to the p2p network dht Query the DHT for values or peers routing Issue routing commands ping Measure the latency of a connection bitswap Inspect bitswap state pubsub Send and receive messages via pubsub TOOL COMMANDS config Manage configuration version Show IPFS version information diag Generate diagnostic reports update Download and apply go-ipfs updates commands List all available commands log Manage and show logs of running daemon Use 'ipfs <command> --help' to learn more about each command. ipfs uses a repository in the local file system. By default, the repo is located at ~/.ipfs. To change the repo location, set the $IPFS_PATH environment variable: export IPFS_PATH=/path/to/ipfsrepo EXIT STATUS The CLI will exit with one of the following values: 0 Successful execution. 1 Failed executions. `, }, Options: []cmds.Option{ cmds.StringOption(RepoDirOption, "Path to the repository directory to use."), cmds.StringOption(ConfigFileOption, "Path to the configuration file to use."), cmds.StringOption(ConfigOption, "c", "[DEPRECATED] Path to the configuration file to use."), cmds.BoolOption(DebugOption, "D", "Operate in debug mode."), cmds.BoolOption(cmds.OptLongHelp, "Show the full command help text."), cmds.BoolOption(cmds.OptShortHelp, "Show a short version of the command help text."), cmds.BoolOption(LocalOption, "L", "Run the command locally, instead of using the daemon. DEPRECATED: use --offline."), cmds.BoolOption(OfflineOption, "Run the command offline."), cmds.StringOption(ApiOption, "Use a specific API instance (defaults to /ip4/127.0.0.1/tcp/5001)"), cmds.StringOption(ApiAuthOption, "Optional RPC API authorization secret (defined as AuthSecret in API.Authorizations config)"), cmdenv.OptionCidBase, cmdenv.OptionUpgradeCidV0InOutput, cmds.OptionEncodingType, cmds.OptionStreamChannels, cmds.OptionTimeout, }, }
var RoutingCmd = &cmds.Command{ Helptext: cmds.HelpText{ Tagline: "Issue routing commands.", ShortDescription: ``, }, Subcommands: map[string]*cmds.Command{ "findprovs": findProvidersRoutingCmd, "findpeer": findPeerRoutingCmd, "get": getValueRoutingCmd, "put": putValueRoutingCmd, "provide": provideRefRoutingCmd, }, }
var StatsCmd = &cmds.Command{ Helptext: cmds.HelpText{ Tagline: "Query IPFS statistics.", ShortDescription: `'ipfs stats' is a set of commands to help look at statistics for your IPFS node. `, LongDescription: `'ipfs stats' is a set of commands to help look at statistics for your IPFS node.`, }, Subcommands: map[string]*cmds.Command{ "bw": statBwCmd, "repo": repoStatCmd, "bitswap": bitswapStatCmd, "dht": statDhtCmd, "provide": statProvideCmd, }, }
var SwarmCmd = &cmds.Command{ Helptext: cmds.HelpText{ Tagline: "Interact with the swarm.", ShortDescription: ` 'ipfs swarm' is a tool to manipulate the network swarm. The swarm is the component that opens, listens for, and maintains connections to other ipfs peers in the internet. `, }, Subcommands: map[string]*cmds.Command{ "addrs": swarmAddrsCmd, "connect": swarmConnectCmd, "disconnect": swarmDisconnectCmd, "filters": swarmFiltersCmd, "peers": swarmPeersCmd, "peering": swarmPeeringCmd, "resources": swarmResourcesCmd, }, }
var VersionCmd = &cmds.Command{ Helptext: cmds.HelpText{ Tagline: "Show IPFS version information.", ShortDescription: "Returns the current version of IPFS and exits.", }, Subcommands: map[string]*cmds.Command{ "deps": depsVersionCommand, "check": checkVersionCommand, }, Options: []cmds.Option{ cmds.BoolOption(versionNumberOptionName, "n", "Only show the version number."), cmds.BoolOption(versionCommitOptionName, "Show the commit hash."), cmds.BoolOption(versionRepoOptionName, "Show repo version."), cmds.BoolOption(versionAllOptionName, "Show all version information"), }, Extra: CreateCmdExtras(SetDoesNotUseRepo(true), SetDoesNotUseConfigAsInput(true)), Run: func(req *cmds.Request, res cmds.ResponseEmitter, env cmds.Environment) error { return cmds.EmitOnce(res, version.GetVersionInfo()) }, Encoders: cmds.EncoderMap{ cmds.Text: cmds.MakeTypedEncoder(func(req *cmds.Request, w io.Writer, version *version.VersionInfo) error { all, _ := req.Options[versionAllOptionName].(bool) if all { ver := version.Version if version.Commit != "" { ver += "-" + version.Commit } out := fmt.Sprintf("Kubo version: %s\n"+ "Repo version: %s\nSystem version: %s\nGolang version: %s\n", ver, version.Repo, version.System, version.Golang) fmt.Fprint(w, out) return nil } commit, _ := req.Options[versionCommitOptionName].(bool) commitTxt := "" if commit && version.Commit != "" { commitTxt = "-" + version.Commit } repo, _ := req.Options[versionRepoOptionName].(bool) if repo { fmt.Fprintln(w, version.Repo) return nil } number, _ := req.Options[versionNumberOptionName].(bool) if number { fmt.Fprintln(w, version.Version+commitTxt) return nil } fmt.Fprintf(w, "ipfs version %s%s\n", version.Version, commitTxt) return nil }), }, Type: version.VersionInfo{}, }
Functions ¶
func CommandsCmd ¶
CommandsCmd takes in a root command, and returns a command that lists the subcommands in that root
func CreateCmdExtras ¶ added in v0.14.0
func DaemonNotRunning ¶ added in v0.14.0
func DaemonNotRunning(req *cmds.Request, env cmds.Environment) error
DaemonNotRunning checks to see if the ipfs repo is locked, indicating that the daemon is running, and returns and error if the daemon is running.
func ExternalBinary ¶ added in v0.3.10
func GetDoesNotUseConfigAsInput ¶ added in v0.14.0
func GetDoesNotUseRepo ¶ added in v0.14.0
func GetPreemptsAutoUpdate ¶ added in v0.14.0
func SetDoesNotUseConfigAsInput ¶ added in v0.14.0
func SetDoesNotUseRepo ¶ added in v0.14.0
func SetPreemptsAutoUpdate ¶ added in v0.14.0
Types ¶
type BootstrapOutput ¶
type BootstrapOutput struct {
Peers []string
}
type CidFormatRes ¶ added in v0.4.18
type CodeAndName ¶ added in v0.4.18
type ConfigField ¶
type ConfigField struct { Key string Value interface{} }
type ConfigUpdateOutput ¶ added in v0.4.18
ConfigUpdateOutput is config profile apply command's output
type Dependency ¶ added in v0.14.0
type KeyOutputList ¶ added in v0.4.5
type KeyOutputList struct {
Keys []KeyOutput
}
type KeyRenameOutput ¶ added in v0.4.10
KeyRenameOutput define the output type of keyRenameCmd
type KeySignOutput ¶ added in v0.25.0
type KeyVerifyOutput ¶ added in v0.25.0
type LsLink ¶ added in v0.3.2
type LsLink struct {
Name, Hash string
Size uint64
Type unixfs_pb.Data_DataType
Target string
Mode os.FileMode
ModTime time.Time
}
LsLink contains printable data for a single ipld link in ls output
type LsObject ¶ added in v0.3.2
LsObject is an element of LsOutput It can represent all or part of a directory
type LsOutput ¶
type LsOutput struct {
Objects []LsObject
}
LsOutput is a set of printable data for directories, it can be complete or partial
type MessageOutput ¶
type MessageOutput struct {
Message string
}
type P2PListenerInfoOutput ¶ added in v0.4.10
P2PListenerInfoOutput is output type of ls command
type P2PLsOutput ¶ added in v0.4.10
type P2PLsOutput struct {
Listeners []P2PListenerInfoOutput
}
P2PLsOutput is output type of ls command
type P2PStreamInfoOutput ¶ added in v0.4.10
type P2PStreamInfoOutput struct { HandlerID string Protocol string OriginAddress string TargetAddress string }
P2PStreamInfoOutput is output type of streams command
type P2PStreamsOutput ¶ added in v0.4.10
type P2PStreamsOutput struct {
Streams []P2PStreamInfoOutput
}
P2PStreamsOutput is output type of streams command
type RefWrapper ¶ added in v0.3.5
type RefWriter ¶
type RefWriter struct { DAG ipld.NodeGetter Ctx context.Context Unique bool MaxDepth int PrintFmt string // contains filtered or unexported fields }
type RepoVersion ¶ added in v0.4.3
type RepoVersion struct {
Version string
}
type TimeParts ¶ added in v0.30.0
type TimeParts struct {
// contains filtered or unexported fields
}
func (TimeParts) MarshalJSON ¶ added in v0.30.0
type VerifyProgress ¶ added in v0.4.3
type VersionCheckOutput ¶ added in v0.30.0
type VersionCheckOutput struct { UpdateAvailable bool RunningVersion string GreatestVersion string PeersSampled int WithGreaterVersion int }
func DetectNewKuboVersion ¶ added in v0.30.0
func DetectNewKuboVersion(nd *core.IpfsNode, minPercent int64) (VersionCheckOutput, error)
DetectNewKuboVersion observers kubo version reported by other peers via libp2p identify protocol and notifies when threshold fraction of seen swarm is running updated Kubo. It is used by RPC and CLI at 'ipfs version check' and also periodically when 'ipfs daemon' is running.
Source Files ¶
- active.go
- add.go
- bitswap.go
- block.go
- bootstrap.go
- cat.go
- cid.go
- commands.go
- completion.go
- config.go
- dht.go
- diag.go
- external.go
- extra.go
- files.go
- filestore.go
- get.go
- id.go
- keystore.go
- log.go
- ls.go
- mount_unix.go
- multibase.go
- p2p.go
- ping.go
- profile.go
- pubsub.go
- refs.go
- repo.go
- resolve.go
- root.go
- routing.go
- shutdown.go
- stat.go
- stat_dht.go
- stat_provide.go
- swarm.go
- sysdiag.go
- version.go