D9999: sshpeer: enable+fix warning about sshpeers not being closed explicitly
valentin.gatienbaron (Valentin Gatien-Baron)
phabricator at mercurial-scm.org
Mon Feb 15 21:46:05 UTC 2021
valentin.gatienbaron created this revision.
Herald added a reviewer: durin42.
Herald added a reviewer: martinvonz.
Herald added a reviewer: hg-reviewers.
Herald added a subscriber: mercurial-patches.
REVISION SUMMARY
I recommend looking at this with a diff that ignores indentation.
The test changes are because localrepo.close() updates some cache,
which appears happens earlier now on rollbacks or strips or something.
The http changes are because httppeer.close() prints stats with
--verbose.
REPOSITORY
rHG Mercurial
BRANCH
default
REVISION DETAIL
https://phab.mercurial-scm.org/D9999
AFFECTED FILES
hgext/infinitepush/__init__.py
hgext/narrow/narrowcommands.py
mercurial/commands.py
mercurial/debugcommands.py
mercurial/hg.py
mercurial/revset.py
mercurial/sshpeer.py
mercurial/subrepo.py
tests/remotefilelog-getflogheads.py
tests/test-acl.t
tests/test-http.t
tests/test-lfs-serve.t
CHANGE DETAILS
diff --git a/tests/test-lfs-serve.t b/tests/test-lfs-serve.t
--- a/tests/test-lfs-serve.t
+++ b/tests/test-lfs-serve.t
@@ -462,6 +462,7 @@
remote: adding manifests
remote: adding file changes
remote: added 1 changesets with 1 changes to 1 files
+ (sent 8 HTTP requests and 3526 bytes; received 961 bytes in responses) (?)
$ grep 'lfs' .hg/requires $SERVER_REQUIRES
.hg/requires:lfs
$TESTTMP/server/.hg/requires:lfs
diff --git a/tests/test-http.t b/tests/test-http.t
--- a/tests/test-http.t
+++ b/tests/test-http.t
@@ -382,6 +382,7 @@
devel-peer-request: 16 bytes of commands arguments in headers
devel-peer-request: finished in *.???? seconds (200) (glob)
received listkey for "phases": 15 bytes
+ (sent 9 HTTP requests and 3898 bytes; received 920 bytes in responses)
$ hg rollback -q
$ sed 's/.*] "/"/' < ../access.log
diff --git a/tests/test-acl.t b/tests/test-acl.t
--- a/tests/test-acl.t
+++ b/tests/test-acl.t
@@ -361,6 +361,7 @@
bundle2-input-bundle: 5 parts total
transaction abort!
rollback completed
+ truncating cache/rbc-revs-v1 to 8
abort: acl: user "fred" not allowed on "foo/file.txt" (changeset "ef1ea85a6374")
no rollback information available
0:6675d58eff77
@@ -808,7 +809,6 @@
acl: acl.deny.bookmarks not enabled
acl: bookmark access granted: "ef1ea85a6374b77d6da9dcda9541f498f2d17df7" on bookmark "moving-bookmark"
bundle2-input-bundle: 7 parts total
- truncating cache/rbc-revs-v1 to 8
updating the branch cache
invalid branch cache (served.hidden): tip differs
added 1 changesets with 1 changes to 1 files
@@ -900,6 +900,7 @@
bundle2-input-bundle: 7 parts total
transaction abort!
rollback completed
+ truncating cache/rbc-revs-v1 to 8
abort: acl: user "fred" denied on bookmark "moving-bookmark" (changeset "ef1ea85a6374b77d6da9dcda9541f498f2d17df7")
no rollback information available
0:6675d58eff77
@@ -985,7 +986,6 @@
bundle2-input-part: "phase-heads" supported
bundle2-input-part: total payload size 24
bundle2-input-bundle: 5 parts total
- truncating cache/rbc-revs-v1 to 8
updating the branch cache
added 3 changesets with 3 changes to 3 files
bundle2-output-bundle: "HG20", 1 parts total
@@ -1073,6 +1073,7 @@
bundle2-input-bundle: 5 parts total
transaction abort!
rollback completed
+ truncating cache/rbc-revs-v1 to 8
abort: acl: user "wilma" not allowed on "quux/file.py" (changeset "911600dab2ae")
no rollback information available
0:6675d58eff77
@@ -1322,7 +1323,6 @@
bundle2-input-part: "phase-heads" supported
bundle2-input-part: total payload size 24
bundle2-input-bundle: 5 parts total
- truncating cache/rbc-revs-v1 to 8
updating the branch cache
added 3 changesets with 3 changes to 3 files
bundle2-output-bundle: "HG20", 1 parts total
@@ -1499,6 +1499,7 @@
bundle2-input-bundle: 5 parts total
transaction abort!
rollback completed
+ truncating cache/rbc-revs-v1 to 8
abort: acl: user "fred" denied on "foo/Bar/file.txt" (changeset "f9cafe1212c8")
no rollback information available
0:6675d58eff77
@@ -1583,7 +1584,6 @@
bundle2-input-part: "phase-heads" supported
bundle2-input-part: total payload size 24
bundle2-input-bundle: 5 parts total
- truncating cache/rbc-revs-v1 to 8
updating the branch cache
added 3 changesets with 3 changes to 3 files
bundle2-output-bundle: "HG20", 1 parts total
@@ -1671,6 +1671,7 @@
bundle2-input-bundle: 5 parts total
transaction abort!
rollback completed
+ truncating cache/rbc-revs-v1 to 8
abort: acl: user "fred" denied on "foo/Bar/file.txt" (changeset "f9cafe1212c8")
no rollback information available
0:6675d58eff77
diff --git a/tests/remotefilelog-getflogheads.py b/tests/remotefilelog-getflogheads.py
--- a/tests/remotefilelog-getflogheads.py
+++ b/tests/remotefilelog-getflogheads.py
@@ -21,7 +21,10 @@
dest = repo.ui.expandpath(b'default')
peer = hg.peer(repo, {}, dest)
- flogheads = peer.x_rfl_getflogheads(path)
+ try:
+ flogheads = peer.x_rfl_getflogheads(path)
+ finally:
+ peer.close()
if flogheads:
for head in flogheads:
diff --git a/mercurial/subrepo.py b/mercurial/subrepo.py
--- a/mercurial/subrepo.py
+++ b/mercurial/subrepo.py
@@ -716,13 +716,17 @@
_(b'sharing subrepo %s from %s\n')
% (subrelpath(self), srcurl)
)
- shared = hg.share(
- self._repo._subparent.baseui,
- getpeer(),
- self._repo.root,
- update=False,
- bookmarks=False,
- )
+ peer = getpeer()
+ try:
+ shared = hg.share(
+ self._repo._subparent.baseui,
+ peer,
+ self._repo.root,
+ update=False,
+ bookmarks=False,
+ )
+ finally:
+ peer.close()
self._repo = shared.local()
else:
# TODO: find a common place for this and this code in the
@@ -743,14 +747,18 @@
_(b'cloning subrepo %s from %s\n')
% (subrelpath(self), util.hidepassword(srcurl))
)
- other, cloned = hg.clone(
- self._repo._subparent.baseui,
- {},
- getpeer(),
- self._repo.root,
- update=False,
- shareopts=shareopts,
- )
+ peer = getpeer()
+ try:
+ other, cloned = hg.clone(
+ self._repo._subparent.baseui,
+ {},
+ peer,
+ self._repo.root,
+ update=False,
+ shareopts=shareopts,
+ )
+ finally:
+ peer.close()
self._repo = cloned.local()
self._initrepo(parentrepo, source, create=True)
self._cachestorehash(srcurl)
@@ -760,7 +768,11 @@
% (subrelpath(self), util.hidepassword(srcurl))
)
cleansub = self.storeclean(srcurl)
- exchange.pull(self._repo, getpeer())
+ peer = getpeer()
+ try:
+ exchange.pull(self._repo, peer)
+ finally:
+ peer.close()
if cleansub:
# keep the repo clean after pull
self._cachestorehash(srcurl)
@@ -845,7 +857,10 @@
% (subrelpath(self), util.hidepassword(dsturl))
)
other = hg.peer(self._repo, {b'ssh': ssh}, dsturl)
- res = exchange.push(self._repo, other, force, newbranch=newbranch)
+ try:
+ res = exchange.push(self._repo, other, force, newbranch=newbranch)
+ finally:
+ other.close()
# the repo is now clean
self._cachestorehash(dsturl)
diff --git a/mercurial/sshpeer.py b/mercurial/sshpeer.py
--- a/mercurial/sshpeer.py
+++ b/mercurial/sshpeer.py
@@ -175,10 +175,7 @@
# to deadlocks due to a peer get gc'ed in a fork
# We add our own stack trace, because the stacktrace when called
# from __del__ is useless.
- if False: # enabled in next commit
- ui.develwarn(
- b'missing close on SSH connection created at:\n%s' % warn
- )
+ ui.develwarn(b'missing close on SSH connection created at:\n%s' % warn)
def _makeconnection(ui, sshcmd, args, remotecmd, path, sshenv=None):
diff --git a/mercurial/revset.py b/mercurial/revset.py
--- a/mercurial/revset.py
+++ b/mercurial/revset.py
@@ -1841,9 +1841,12 @@
if revs:
revs = [repo.lookup(rev) for rev in revs]
other = hg.peer(repo, {}, dest)
- repo.ui.pushbuffer()
- outgoing = discovery.findcommonoutgoing(repo, other, onlyheads=revs)
- repo.ui.popbuffer()
+ try:
+ repo.ui.pushbuffer()
+ outgoing = discovery.findcommonoutgoing(repo, other, onlyheads=revs)
+ repo.ui.popbuffer()
+ finally:
+ other.close()
cl = repo.changelog
o = {cl.rev(r) for r in outgoing.missing}
return subset & o
diff --git a/mercurial/hg.py b/mercurial/hg.py
--- a/mercurial/hg.py
+++ b/mercurial/hg.py
@@ -678,140 +678,148 @@
srcpeer = source.peer() # in case we were called with a localrepo
branches = (None, branch or [])
origsource = source = srcpeer.url()
- revs, checkout = addbranchrevs(srcpeer, srcpeer, branches, revs)
-
- if dest is None:
- dest = defaultdest(source)
- if dest:
- ui.status(_(b"destination directory: %s\n") % dest)
- else:
- dest = ui.expandpath(dest)
-
- dest = util.urllocalpath(dest)
- source = util.urllocalpath(source)
-
- if not dest:
- raise error.InputError(_(b"empty destination path is not valid"))
-
- destvfs = vfsmod.vfs(dest, expandpath=True)
- if destvfs.lexists():
- if not destvfs.isdir():
- raise error.InputError(_(b"destination '%s' already exists") % dest)
- elif destvfs.listdir():
- raise error.InputError(_(b"destination '%s' is not empty") % dest)
-
- createopts = {}
- narrow = False
-
- if storeincludepats is not None:
- narrowspec.validatepatterns(storeincludepats)
- narrow = True
-
- if storeexcludepats is not None:
- narrowspec.validatepatterns(storeexcludepats)
- narrow = True
-
- if narrow:
- # Include everything by default if only exclusion patterns defined.
- if storeexcludepats and not storeincludepats:
- storeincludepats = {b'path:.'}
-
- createopts[b'narrowfiles'] = True
-
- if depth:
- createopts[b'shallowfilestore'] = True
-
- if srcpeer.capable(b'lfs-serve'):
- # Repository creation honors the config if it disabled the extension, so
- # we can't just announce that lfs will be enabled. This check avoids
- # saying that lfs will be enabled, and then saying it's an unknown
- # feature. The lfs creation option is set in either case so that a
- # requirement is added. If the extension is explicitly disabled but the
- # requirement is set, the clone aborts early, before transferring any
- # data.
- createopts[b'lfs'] = True
-
- if extensions.disabled_help(b'lfs'):
- ui.status(
- _(
- b'(remote is using large file support (lfs), but it is '
- b'explicitly disabled in the local configuration)\n'
+ srclock = destlock = cleandir = None
+ destpeer = None
+ try:
+ revs, checkout = addbranchrevs(srcpeer, srcpeer, branches, revs)
+
+ if dest is None:
+ dest = defaultdest(source)
+ if dest:
+ ui.status(_(b"destination directory: %s\n") % dest)
+ else:
+ dest = ui.expandpath(dest)
+
+ dest = util.urllocalpath(dest)
+ source = util.urllocalpath(source)
+
+ if not dest:
+ raise error.InputError(_(b"empty destination path is not valid"))
+
+ destvfs = vfsmod.vfs(dest, expandpath=True)
+ if destvfs.lexists():
+ if not destvfs.isdir():
+ raise error.InputError(
+ _(b"destination '%s' already exists") % dest
)
- )
- else:
- ui.status(
- _(
- b'(remote is using large file support (lfs); lfs will '
- b'be enabled for this repository)\n'
+ elif destvfs.listdir():
+ raise error.InputError(
+ _(b"destination '%s' is not empty") % dest
)
- )
-
- shareopts = shareopts or {}
- sharepool = shareopts.get(b'pool')
- sharenamemode = shareopts.get(b'mode')
- if sharepool and islocal(dest):
- sharepath = None
- if sharenamemode == b'identity':
- # Resolve the name from the initial changeset in the remote
- # repository. This returns nullid when the remote is empty. It
- # raises RepoLookupError if revision 0 is filtered or otherwise
- # not available. If we fail to resolve, sharing is not enabled.
- try:
- with srcpeer.commandexecutor() as e:
- rootnode = e.callcommand(
- b'lookup',
- {
- b'key': b'0',
- },
- ).result()
-
- if rootnode != nullid:
- sharepath = os.path.join(sharepool, hex(rootnode))
- else:
+
+ createopts = {}
+ narrow = False
+
+ if storeincludepats is not None:
+ narrowspec.validatepatterns(storeincludepats)
+ narrow = True
+
+ if storeexcludepats is not None:
+ narrowspec.validatepatterns(storeexcludepats)
+ narrow = True
+
+ if narrow:
+ # Include everything by default if only exclusion patterns defined.
+ if storeexcludepats and not storeincludepats:
+ storeincludepats = {b'path:.'}
+
+ createopts[b'narrowfiles'] = True
+
+ if depth:
+ createopts[b'shallowfilestore'] = True
+
+ if srcpeer.capable(b'lfs-serve'):
+ # Repository creation honors the config if it disabled the extension, so
+ # we can't just announce that lfs will be enabled. This check avoids
+ # saying that lfs will be enabled, and then saying it's an unknown
+ # feature. The lfs creation option is set in either case so that a
+ # requirement is added. If the extension is explicitly disabled but the
+ # requirement is set, the clone aborts early, before transferring any
+ # data.
+ createopts[b'lfs'] = True
+
+ if extensions.disabled_help(b'lfs'):
+ ui.status(
+ _(
+ b'(remote is using large file support (lfs), but it is '
+ b'explicitly disabled in the local configuration)\n'
+ )
+ )
+ else:
+ ui.status(
+ _(
+ b'(remote is using large file support (lfs); lfs will '
+ b'be enabled for this repository)\n'
+ )
+ )
+
+ shareopts = shareopts or {}
+ sharepool = shareopts.get(b'pool')
+ sharenamemode = shareopts.get(b'mode')
+ if sharepool and islocal(dest):
+ sharepath = None
+ if sharenamemode == b'identity':
+ # Resolve the name from the initial changeset in the remote
+ # repository. This returns nullid when the remote is empty. It
+ # raises RepoLookupError if revision 0 is filtered or otherwise
+ # not available. If we fail to resolve, sharing is not enabled.
+ try:
+ with srcpeer.commandexecutor() as e:
+ rootnode = e.callcommand(
+ b'lookup',
+ {
+ b'key': b'0',
+ },
+ ).result()
+
+ if rootnode != nullid:
+ sharepath = os.path.join(sharepool, hex(rootnode))
+ else:
+ ui.status(
+ _(
+ b'(not using pooled storage: '
+ b'remote appears to be empty)\n'
+ )
+ )
+ except error.RepoLookupError:
ui.status(
_(
b'(not using pooled storage: '
- b'remote appears to be empty)\n'
+ b'unable to resolve identity of remote)\n'
)
)
- except error.RepoLookupError:
+ elif sharenamemode == b'remote':
+ sharepath = os.path.join(
+ sharepool, hex(hashutil.sha1(source).digest())
+ )
+ else:
+ raise error.Abort(
+ _(b'unknown share naming mode: %s') % sharenamemode
+ )
+
+ # TODO this is a somewhat arbitrary restriction.
+ if narrow:
ui.status(
- _(
- b'(not using pooled storage: '
- b'unable to resolve identity of remote)\n'
- )
+ _(b'(pooled storage not supported for narrow clones)\n')
)
- elif sharenamemode == b'remote':
- sharepath = os.path.join(
- sharepool, hex(hashutil.sha1(source).digest())
- )
- else:
- raise error.Abort(
- _(b'unknown share naming mode: %s') % sharenamemode
- )
-
- # TODO this is a somewhat arbitrary restriction.
- if narrow:
- ui.status(_(b'(pooled storage not supported for narrow clones)\n'))
- sharepath = None
-
- if sharepath:
- return clonewithshare(
- ui,
- peeropts,
- sharepath,
- source,
- srcpeer,
- dest,
- pull=pull,
- rev=revs,
- update=update,
- stream=stream,
- )
-
- srclock = destlock = cleandir = None
- srcrepo = srcpeer.local()
- try:
+ sharepath = None
+
+ if sharepath:
+ return clonewithshare(
+ ui,
+ peeropts,
+ sharepath,
+ source,
+ srcpeer,
+ dest,
+ pull=pull,
+ rev=revs,
+ update=update,
+ stream=stream,
+ )
+
+ srcrepo = srcpeer.local()
+
abspath = origsource
if islocal(origsource):
abspath = os.path.abspath(util.urllocalpath(origsource))
@@ -1052,6 +1060,8 @@
shutil.rmtree(cleandir, True)
if srcpeer is not None:
srcpeer.close()
+ if destpeer and destpeer.local() is None:
+ destpeer.close()
return srcpeer, destpeer
@@ -1253,15 +1263,17 @@
"""
source, branches = parseurl(ui.expandpath(source), opts.get(b'branch'))
other = peer(repo, opts, source)
- ui.status(_(b'comparing with %s\n') % util.hidepassword(source))
- revs, checkout = addbranchrevs(repo, other, branches, opts.get(b'rev'))
-
- if revs:
- revs = [other.lookup(rev) for rev in revs]
- other, chlist, cleanupfn = bundlerepo.getremotechanges(
- ui, repo, other, revs, opts[b"bundle"], opts[b"force"]
- )
+ cleanupfn = other.close
try:
+ ui.status(_(b'comparing with %s\n') % util.hidepassword(source))
+ revs, checkout = addbranchrevs(repo, other, branches, opts.get(b'rev'))
+
+ if revs:
+ revs = [other.lookup(rev) for rev in revs]
+ other, chlist, cleanupfn = bundlerepo.getremotechanges(
+ ui, repo, other, revs, opts[b"bundle"], opts[b"force"]
+ )
+
if not chlist:
ui.status(_(b"no changes found\n"))
return subreporecurse()
@@ -1320,13 +1332,17 @@
revs = [repo[rev].node() for rev in scmutil.revrange(repo, revs)]
other = peer(repo, opts, dest)
- outgoing = discovery.findcommonoutgoing(
- repo, other, revs, force=opts.get(b'force')
- )
- o = outgoing.missing
- if not o:
- scmutil.nochangesfound(repo.ui, repo, outgoing.excluded)
- return o, other
+ try:
+ outgoing = discovery.findcommonoutgoing(
+ repo, other, revs, force=opts.get(b'force')
+ )
+ o = outgoing.missing
+ if not o:
+ scmutil.nochangesfound(repo.ui, repo, outgoing.excluded)
+ return o, other
+ except: # re-raises
+ other.close()
+ raise
def outgoing(ui, repo, dest, opts):
@@ -1341,27 +1357,30 @@
limit = logcmdutil.getlimit(opts)
o, other = _outgoing(ui, repo, dest, opts)
- if not o:
+ try:
+ if not o:
+ cmdutil.outgoinghooks(ui, repo, other, opts, o)
+ return recurse()
+
+ if opts.get(b'newest_first'):
+ o.reverse()
+ ui.pager(b'outgoing')
+ displayer = logcmdutil.changesetdisplayer(ui, repo, opts)
+ count = 0
+ for n in o:
+ if limit is not None and count >= limit:
+ break
+ parents = [p for p in repo.changelog.parents(n) if p != nullid]
+ if opts.get(b'no_merges') and len(parents) == 2:
+ continue
+ count += 1
+ displayer.show(repo[n])
+ displayer.close()
cmdutil.outgoinghooks(ui, repo, other, opts, o)
- return recurse()
-
- if opts.get(b'newest_first'):
- o.reverse()
- ui.pager(b'outgoing')
- displayer = logcmdutil.changesetdisplayer(ui, repo, opts)
- count = 0
- for n in o:
- if limit is not None and count >= limit:
- break
- parents = [p for p in repo.changelog.parents(n) if p != nullid]
- if opts.get(b'no_merges') and len(parents) == 2:
- continue
- count += 1
- displayer.show(repo[n])
- displayer.close()
- cmdutil.outgoinghooks(ui, repo, other, opts, o)
- recurse()
- return 0 # exit code is zero since we found outgoing changes
+ recurse()
+ return 0 # exit code is zero since we found outgoing changes
+ finally:
+ other.close()
def verify(repo, level=None):
diff --git a/mercurial/debugcommands.py b/mercurial/debugcommands.py
--- a/mercurial/debugcommands.py
+++ b/mercurial/debugcommands.py
@@ -471,17 +471,20 @@
"""lists the capabilities of a remote peer"""
opts = pycompat.byteskwargs(opts)
peer = hg.peer(ui, opts, path)
- caps = peer.capabilities()
- ui.writenoi18n(b'Main capabilities:\n')
- for c in sorted(caps):
- ui.write(b' %s\n' % c)
- b2caps = bundle2.bundle2caps(peer)
- if b2caps:
- ui.writenoi18n(b'Bundle2 capabilities:\n')
- for key, values in sorted(pycompat.iteritems(b2caps)):
- ui.write(b' %s\n' % key)
- for v in values:
- ui.write(b' %s\n' % v)
+ try:
+ caps = peer.capabilities()
+ ui.writenoi18n(b'Main capabilities:\n')
+ for c in sorted(caps):
+ ui.write(b' %s\n' % c)
+ b2caps = bundle2.bundle2caps(peer)
+ if b2caps:
+ ui.writenoi18n(b'Bundle2 capabilities:\n')
+ for key, values in sorted(pycompat.iteritems(b2caps)):
+ ui.write(b' %s\n' % key)
+ for v in values:
+ ui.write(b' %s\n' % v)
+ finally:
+ peer.close()
@command(
@@ -2615,12 +2618,17 @@
with ui.configoverride(overrides):
peer = hg.peer(ui, {}, path)
- local = peer.local() is not None
- canpush = peer.canpush()
-
- ui.write(_(b'url: %s\n') % peer.url())
- ui.write(_(b'local: %s\n') % (_(b'yes') if local else _(b'no')))
- ui.write(_(b'pushable: %s\n') % (_(b'yes') if canpush else _(b'no')))
+ try:
+ local = peer.local() is not None
+ canpush = peer.canpush()
+
+ ui.write(_(b'url: %s\n') % peer.url())
+ ui.write(_(b'local: %s\n') % (_(b'yes') if local else _(b'no')))
+ ui.write(
+ _(b'pushable: %s\n') % (_(b'yes') if canpush else _(b'no'))
+ )
+ finally:
+ peer.close()
@command(
@@ -2723,26 +2731,30 @@
"""
target = hg.peer(ui, {}, repopath)
- if keyinfo:
- key, old, new = keyinfo
- with target.commandexecutor() as e:
- r = e.callcommand(
- b'pushkey',
- {
- b'namespace': namespace,
- b'key': key,
- b'old': old,
- b'new': new,
- },
- ).result()
-
- ui.status(pycompat.bytestr(r) + b'\n')
- return not r
- else:
- for k, v in sorted(pycompat.iteritems(target.listkeys(namespace))):
- ui.write(
- b"%s\t%s\n" % (stringutil.escapestr(k), stringutil.escapestr(v))
- )
+ try:
+ if keyinfo:
+ key, old, new = keyinfo
+ with target.commandexecutor() as e:
+ r = e.callcommand(
+ b'pushkey',
+ {
+ b'namespace': namespace,
+ b'key': key,
+ b'old': old,
+ b'new': new,
+ },
+ ).result()
+
+ ui.status(pycompat.bytestr(r) + b'\n')
+ return not r
+ else:
+ for k, v in sorted(pycompat.iteritems(target.listkeys(namespace))):
+ ui.write(
+ b"%s\t%s\n"
+ % (stringutil.escapestr(k), stringutil.escapestr(v))
+ )
+ finally:
+ target.close()
@command(b'debugpvec', [], _(b'A B'))
@@ -4092,19 +4104,22 @@
def debugwireargs(ui, repopath, *vals, **opts):
opts = pycompat.byteskwargs(opts)
repo = hg.peer(ui, opts, repopath)
- for opt in cmdutil.remoteopts:
- del opts[opt[1]]
- args = {}
- for k, v in pycompat.iteritems(opts):
- if v:
- args[k] = v
- args = pycompat.strkwargs(args)
- # run twice to check that we don't mess up the stream for the next command
- res1 = repo.debugwireargs(*vals, **args)
- res2 = repo.debugwireargs(*vals, **args)
- ui.write(b"%s\n" % res1)
- if res1 != res2:
- ui.warn(b"%s\n" % res2)
+ try:
+ for opt in cmdutil.remoteopts:
+ del opts[opt[1]]
+ args = {}
+ for k, v in pycompat.iteritems(opts):
+ if v:
+ args[k] = v
+ args = pycompat.strkwargs(args)
+ # run twice to check that we don't mess up the stream for the next command
+ res1 = repo.debugwireargs(*vals, **args)
+ res2 = repo.debugwireargs(*vals, **args)
+ ui.write(b"%s\n" % res1)
+ if res1 != res2:
+ ui.warn(b"%s\n" % res2)
+ finally:
+ repo.close()
def _parsewirelangblocks(fh):
diff --git a/mercurial/commands.py b/mercurial/commands.py
--- a/mercurial/commands.py
+++ b/mercurial/commands.py
@@ -3820,132 +3820,138 @@
output = []
revs = []
- if source:
- source, branches = hg.parseurl(ui.expandpath(source))
- peer = hg.peer(repo or ui, opts, source) # only pass ui when no repo
- repo = peer.local()
- revs, checkout = hg.addbranchrevs(repo, peer, branches, None)
-
- fm = ui.formatter(b'identify', opts)
- fm.startitem()
-
- if not repo:
- if num or branch or tags:
- raise error.InputError(
- _(b"can't query remote revision number, branch, or tags")
- )
- if not rev and revs:
- rev = revs[0]
- if not rev:
- rev = b"tip"
-
- remoterev = peer.lookup(rev)
- hexrev = fm.hexfunc(remoterev)
- if default or id:
- output = [hexrev]
- fm.data(id=hexrev)
-
- @util.cachefunc
- def getbms():
- bms = []
-
- if b'bookmarks' in peer.listkeys(b'namespaces'):
- hexremoterev = hex(remoterev)
- bms = [
- bm
- for bm, bmr in pycompat.iteritems(
- peer.listkeys(b'bookmarks')
+ peer = None
+ try:
+ if source:
+ source, branches = hg.parseurl(ui.expandpath(source))
+ # only pass ui when no repo
+ peer = hg.peer(repo or ui, opts, source)
+ repo = peer.local()
+ revs, checkout = hg.addbranchrevs(repo, peer, branches, None)
+
+ fm = ui.formatter(b'identify', opts)
+ fm.startitem()
+
+ if not repo:
+ if num or branch or tags:
+ raise error.InputError(
+ _(b"can't query remote revision number, branch, or tags")
+ )
+ if not rev and revs:
+ rev = revs[0]
+ if not rev:
+ rev = b"tip"
+
+ remoterev = peer.lookup(rev)
+ hexrev = fm.hexfunc(remoterev)
+ if default or id:
+ output = [hexrev]
+ fm.data(id=hexrev)
+
+ @util.cachefunc
+ def getbms():
+ bms = []
+
+ if b'bookmarks' in peer.listkeys(b'namespaces'):
+ hexremoterev = hex(remoterev)
+ bms = [
+ bm
+ for bm, bmr in pycompat.iteritems(
+ peer.listkeys(b'bookmarks')
+ )
+ if bmr == hexremoterev
+ ]
+
+ return sorted(bms)
+
+ if fm.isplain():
+ if bookmarks:
+ output.extend(getbms())
+ elif default and not ui.quiet:
+ # multiple bookmarks for a single parent separated by '/'
+ bm = b'/'.join(getbms())
+ if bm:
+ output.append(bm)
+ else:
+ fm.data(node=hex(remoterev))
+ if bookmarks or b'bookmarks' in fm.datahint():
+ fm.data(bookmarks=fm.formatlist(getbms(), name=b'bookmark'))
+ else:
+ if rev:
+ repo = scmutil.unhidehashlikerevs(repo, [rev], b'nowarn')
+ ctx = scmutil.revsingle(repo, rev, None)
+
+ if ctx.rev() is None:
+ ctx = repo[None]
+ parents = ctx.parents()
+ taglist = []
+ for p in parents:
+ taglist.extend(p.tags())
+
+ dirty = b""
+ if ctx.dirty(missing=True, merge=False, branch=False):
+ dirty = b'+'
+ fm.data(dirty=dirty)
+
+ hexoutput = [fm.hexfunc(p.node()) for p in parents]
+ if default or id:
+ output = [b"%s%s" % (b'+'.join(hexoutput), dirty)]
+ fm.data(id=b"%s%s" % (b'+'.join(hexoutput), dirty))
+
+ if num:
+ numoutput = [b"%d" % p.rev() for p in parents]
+ output.append(b"%s%s" % (b'+'.join(numoutput), dirty))
+
+ fm.data(
+ parents=fm.formatlist(
+ [fm.hexfunc(p.node()) for p in parents], name=b'node'
)
- if bmr == hexremoterev
- ]
-
- return sorted(bms)
-
- if fm.isplain():
- if bookmarks:
- output.extend(getbms())
- elif default and not ui.quiet:
+ )
+ else:
+ hexoutput = fm.hexfunc(ctx.node())
+ if default or id:
+ output = [hexoutput]
+ fm.data(id=hexoutput)
+
+ if num:
+ output.append(pycompat.bytestr(ctx.rev()))
+ taglist = ctx.tags()
+
+ if default and not ui.quiet:
+ b = ctx.branch()
+ if b != b'default':
+ output.append(b"(%s)" % b)
+
+ # multiple tags for a single parent separated by '/'
+ t = b'/'.join(taglist)
+ if t:
+ output.append(t)
+
# multiple bookmarks for a single parent separated by '/'
- bm = b'/'.join(getbms())
+ bm = b'/'.join(ctx.bookmarks())
if bm:
output.append(bm)
- else:
- fm.data(node=hex(remoterev))
- if bookmarks or b'bookmarks' in fm.datahint():
- fm.data(bookmarks=fm.formatlist(getbms(), name=b'bookmark'))
- else:
- if rev:
- repo = scmutil.unhidehashlikerevs(repo, [rev], b'nowarn')
- ctx = scmutil.revsingle(repo, rev, None)
-
- if ctx.rev() is None:
- ctx = repo[None]
- parents = ctx.parents()
- taglist = []
- for p in parents:
- taglist.extend(p.tags())
-
- dirty = b""
- if ctx.dirty(missing=True, merge=False, branch=False):
- dirty = b'+'
- fm.data(dirty=dirty)
-
- hexoutput = [fm.hexfunc(p.node()) for p in parents]
- if default or id:
- output = [b"%s%s" % (b'+'.join(hexoutput), dirty)]
- fm.data(id=b"%s%s" % (b'+'.join(hexoutput), dirty))
-
- if num:
- numoutput = [b"%d" % p.rev() for p in parents]
- output.append(b"%s%s" % (b'+'.join(numoutput), dirty))
-
- fm.data(
- parents=fm.formatlist(
- [fm.hexfunc(p.node()) for p in parents], name=b'node'
- )
- )
- else:
- hexoutput = fm.hexfunc(ctx.node())
- if default or id:
- output = [hexoutput]
- fm.data(id=hexoutput)
-
- if num:
- output.append(pycompat.bytestr(ctx.rev()))
- taglist = ctx.tags()
-
- if default and not ui.quiet:
- b = ctx.branch()
- if b != b'default':
- output.append(b"(%s)" % b)
-
- # multiple tags for a single parent separated by '/'
- t = b'/'.join(taglist)
- if t:
- output.append(t)
-
- # multiple bookmarks for a single parent separated by '/'
- bm = b'/'.join(ctx.bookmarks())
- if bm:
- output.append(bm)
- else:
- if branch:
- output.append(ctx.branch())
-
- if tags:
- output.extend(taglist)
-
- if bookmarks:
- output.extend(ctx.bookmarks())
-
- fm.data(node=ctx.hex())
- fm.data(branch=ctx.branch())
- fm.data(tags=fm.formatlist(taglist, name=b'tag', sep=b':'))
- fm.data(bookmarks=fm.formatlist(ctx.bookmarks(), name=b'bookmark'))
- fm.context(ctx=ctx)
-
- fm.plain(b"%s\n" % b' '.join(output))
- fm.end()
+ else:
+ if branch:
+ output.append(ctx.branch())
+
+ if tags:
+ output.extend(taglist)
+
+ if bookmarks:
+ output.extend(ctx.bookmarks())
+
+ fm.data(node=ctx.hex())
+ fm.data(branch=ctx.branch())
+ fm.data(tags=fm.formatlist(taglist, name=b'tag', sep=b':'))
+ fm.data(bookmarks=fm.formatlist(ctx.bookmarks(), name=b'bookmark'))
+ fm.context(ctx=ctx)
+
+ fm.plain(b"%s\n" % b' '.join(output))
+ fm.end()
+ finally:
+ if peer:
+ peer.close()
@command(
@@ -4291,12 +4297,15 @@
ui.expandpath(source), opts.get(b'branch')
)
other = hg.peer(repo, opts, source)
- if b'bookmarks' not in other.listkeys(b'namespaces'):
- ui.warn(_(b"remote doesn't support bookmarks\n"))
- return 0
- ui.pager(b'incoming')
- ui.status(_(b'comparing with %s\n') % util.hidepassword(source))
- return bookmarks.incoming(ui, repo, other)
+ try:
+ if b'bookmarks' not in other.listkeys(b'namespaces'):
+ ui.warn(_(b"remote doesn't support bookmarks\n"))
+ return 0
+ ui.pager(b'incoming')
+ ui.status(_(b'comparing with %s\n') % util.hidepassword(source))
+ return bookmarks.incoming(ui, repo, other)
+ finally:
+ other.close()
repo._subtoppath = ui.expandpath(source)
try:
@@ -4327,7 +4336,8 @@
Returns 0 on success.
"""
opts = pycompat.byteskwargs(opts)
- hg.peer(ui, opts, ui.expandpath(dest), create=True)
+ peer = hg.peer(ui, opts, ui.expandpath(dest), create=True)
+ peer.close()
@command(
@@ -4963,12 +4973,15 @@
if opts.get(b'bookmarks'):
dest = path.pushloc or path.loc
other = hg.peer(repo, opts, dest)
- if b'bookmarks' not in other.listkeys(b'namespaces'):
- ui.warn(_(b"remote doesn't support bookmarks\n"))
- return 0
- ui.status(_(b'comparing with %s\n') % util.hidepassword(dest))
- ui.pager(b'outgoing')
- return bookmarks.outgoing(ui, repo, other)
+ try:
+ if b'bookmarks' not in other.listkeys(b'namespaces'):
+ ui.warn(_(b"remote doesn't support bookmarks\n"))
+ return 0
+ ui.status(_(b'comparing with %s\n') % util.hidepassword(dest))
+ ui.pager(b'outgoing')
+ return bookmarks.outgoing(ui, repo, other)
+ finally:
+ other.close()
repo._subtoppath = path.pushloc or path.loc
try:
@@ -5679,63 +5692,67 @@
revs, checkout = hg.addbranchrevs(repo, repo, branches, opts.get(b'rev'))
other = hg.peer(repo, opts, dest)
- if revs:
- revs = [repo[r].node() for r in scmutil.revrange(repo, revs)]
- if not revs:
+ try:
+ if revs:
+ revs = [repo[r].node() for r in scmutil.revrange(repo, revs)]
+ if not revs:
+ raise error.InputError(
+ _(b"specified revisions evaluate to an empty set"),
+ hint=_(b"use different revision arguments"),
+ )
+ elif path.pushrev:
+ # It doesn't make any sense to specify ancestor revisions. So limit
+ # to DAG heads to make discovery simpler.
+ expr = revsetlang.formatspec(b'heads(%r)', path.pushrev)
+ revs = scmutil.revrange(repo, [expr])
+ revs = [repo[rev].node() for rev in revs]
+ if not revs:
+ raise error.InputError(
+ _(b'default push revset for path evaluates to an empty set')
+ )
+ elif ui.configbool(b'commands', b'push.require-revs'):
raise error.InputError(
- _(b"specified revisions evaluate to an empty set"),
- hint=_(b"use different revision arguments"),
+ _(b'no revisions specified to push'),
+ hint=_(b'did you mean "hg push -r ."?'),
)
- elif path.pushrev:
- # It doesn't make any sense to specify ancestor revisions. So limit
- # to DAG heads to make discovery simpler.
- expr = revsetlang.formatspec(b'heads(%r)', path.pushrev)
- revs = scmutil.revrange(repo, [expr])
- revs = [repo[rev].node() for rev in revs]
- if not revs:
- raise error.InputError(
- _(b'default push revset for path evaluates to an empty set')
- )
- elif ui.configbool(b'commands', b'push.require-revs'):
- raise error.InputError(
- _(b'no revisions specified to push'),
- hint=_(b'did you mean "hg push -r ."?'),
+
+ repo._subtoppath = dest
+ try:
+ # push subrepos depth-first for coherent ordering
+ c = repo[b'.']
+ subs = c.substate # only repos that are committed
+ for s in sorted(subs):
+ result = c.sub(s).push(opts)
+ if result == 0:
+ return not result
+ finally:
+ del repo._subtoppath
+
+ opargs = dict(
+ opts.get(b'opargs', {})
+ ) # copy opargs since we may mutate it
+ opargs.setdefault(b'pushvars', []).extend(opts.get(b'pushvars', []))
+
+ pushop = exchange.push(
+ repo,
+ other,
+ opts.get(b'force'),
+ revs=revs,
+ newbranch=opts.get(b'new_branch'),
+ bookmarks=opts.get(b'bookmark', ()),
+ publish=opts.get(b'publish'),
+ opargs=opargs,
)
- repo._subtoppath = dest
- try:
- # push subrepos depth-first for coherent ordering
- c = repo[b'.']
- subs = c.substate # only repos that are committed
- for s in sorted(subs):
- result = c.sub(s).push(opts)
- if result == 0:
- return not result
+ result = not pushop.cgresult
+
+ if pushop.bkresult is not None:
+ if pushop.bkresult == 2:
+ result = 2
+ elif not result and pushop.bkresult:
+ result = 2
finally:
- del repo._subtoppath
-
- opargs = dict(opts.get(b'opargs', {})) # copy opargs since we may mutate it
- opargs.setdefault(b'pushvars', []).extend(opts.get(b'pushvars', []))
-
- pushop = exchange.push(
- repo,
- other,
- opts.get(b'force'),
- revs=revs,
- newbranch=opts.get(b'new_branch'),
- bookmarks=opts.get(b'bookmark', ()),
- publish=opts.get(b'publish'),
- opargs=opargs,
- )
-
- result = not pushop.cgresult
-
- if pushop.bkresult is not None:
- if pushop.bkresult == 2:
- result = 2
- elif not result and pushop.bkresult:
- result = 2
-
+ other.close()
return result
diff --git a/hgext/narrow/narrowcommands.py b/hgext/narrow/narrowcommands.py
--- a/hgext/narrow/narrowcommands.py
+++ b/hgext/narrow/narrowcommands.py
@@ -595,77 +595,83 @@
ui.status(_(b'comparing with %s\n') % util.hidepassword(url))
remote = hg.peer(repo, opts, url)
- # check narrow support before doing anything if widening needs to be
- # performed. In future we should also abort if client is ellipses and
- # server does not support ellipses
- if widening and wireprototypes.NARROWCAP not in remote.capabilities():
- raise error.Abort(_(b"server does not support narrow clones"))
-
- commoninc = discovery.findcommonincoming(repo, remote)
-
- if autoremoveincludes:
- outgoing = discovery.findcommonoutgoing(
- repo, remote, commoninc=commoninc
- )
- ui.status(_(b'looking for unused includes to remove\n'))
- localfiles = set()
- for n in itertools.chain(outgoing.missing, outgoing.excluded):
- localfiles.update(repo[n].files())
- suggestedremovals = []
- for include in sorted(oldincludes):
- match = narrowspec.match(repo.root, [include], oldexcludes)
- if not any(match(f) for f in localfiles):
- suggestedremovals.append(include)
- if suggestedremovals:
- for s in suggestedremovals:
- ui.status(b'%s\n' % s)
- if (
- ui.promptchoice(
- _(
- b'remove these unused includes (yn)?'
- b'$$ &Yes $$ &No'
+ try:
+ # check narrow support before doing anything if widening needs to be
+ # performed. In future we should also abort if client is ellipses and
+ # server does not support ellipses
+ if (
+ widening
+ and wireprototypes.NARROWCAP not in remote.capabilities()
+ ):
+ raise error.Abort(_(b"server does not support narrow clones"))
+
+ commoninc = discovery.findcommonincoming(repo, remote)
+
+ if autoremoveincludes:
+ outgoing = discovery.findcommonoutgoing(
+ repo, remote, commoninc=commoninc
+ )
+ ui.status(_(b'looking for unused includes to remove\n'))
+ localfiles = set()
+ for n in itertools.chain(outgoing.missing, outgoing.excluded):
+ localfiles.update(repo[n].files())
+ suggestedremovals = []
+ for include in sorted(oldincludes):
+ match = narrowspec.match(repo.root, [include], oldexcludes)
+ if not any(match(f) for f in localfiles):
+ suggestedremovals.append(include)
+ if suggestedremovals:
+ for s in suggestedremovals:
+ ui.status(b'%s\n' % s)
+ if (
+ ui.promptchoice(
+ _(
+ b'remove these unused includes (yn)?'
+ b'$$ &Yes $$ &No'
+ )
)
- )
- == 0
- ):
- removedincludes.update(suggestedremovals)
- narrowing = True
- else:
- ui.status(_(b'found no unused includes\n'))
-
- if narrowing:
- newincludes = oldincludes - removedincludes
- newexcludes = oldexcludes | addedexcludes
- _narrow(
- ui,
- repo,
- remote,
- commoninc,
- oldincludes,
- oldexcludes,
- newincludes,
- newexcludes,
- opts[b'force_delete_local_changes'],
- opts[b'backup'],
- )
- # _narrow() updated the narrowspec and _widen() below needs to
- # use the updated values as its base (otherwise removed includes
- # and addedexcludes will be lost in the resulting narrowspec)
- oldincludes = newincludes
- oldexcludes = newexcludes
-
- if widening:
- newincludes = oldincludes | addedincludes
- newexcludes = oldexcludes - removedexcludes
- _widen(
- ui,
- repo,
- remote,
- commoninc,
- oldincludes,
- oldexcludes,
- newincludes,
- newexcludes,
- )
+ == 0
+ ):
+ removedincludes.update(suggestedremovals)
+ narrowing = True
+ else:
+ ui.status(_(b'found no unused includes\n'))
+
+ if narrowing:
+ newincludes = oldincludes - removedincludes
+ newexcludes = oldexcludes | addedexcludes
+ _narrow(
+ ui,
+ repo,
+ remote,
+ commoninc,
+ oldincludes,
+ oldexcludes,
+ newincludes,
+ newexcludes,
+ opts[b'force_delete_local_changes'],
+ opts[b'backup'],
+ )
+ # _narrow() updated the narrowspec and _widen() below needs to
+ # use the updated values as its base (otherwise removed includes
+ # and addedexcludes will be lost in the resulting narrowspec)
+ oldincludes = newincludes
+ oldexcludes = newexcludes
+
+ if widening:
+ newincludes = oldincludes | addedincludes
+ newexcludes = oldexcludes - removedexcludes
+ _widen(
+ ui,
+ repo,
+ remote,
+ commoninc,
+ oldincludes,
+ oldexcludes,
+ newincludes,
+ newexcludes,
+ )
+ finally:
+ remote.close()
return 0
diff --git a/hgext/infinitepush/__init__.py b/hgext/infinitepush/__init__.py
--- a/hgext/infinitepush/__init__.py
+++ b/hgext/infinitepush/__init__.py
@@ -704,16 +704,19 @@
if scratchbookmarks:
other = hg.peer(repo, opts, source)
- fetchedbookmarks = other.listkeyspatterns(
- b'bookmarks', patterns=scratchbookmarks
- )
- for bookmark in scratchbookmarks:
- if bookmark not in fetchedbookmarks:
- raise error.Abort(
- b'remote bookmark %s not found!' % bookmark
- )
- scratchbookmarks[bookmark] = fetchedbookmarks[bookmark]
- revs.append(fetchedbookmarks[bookmark])
+ try:
+ fetchedbookmarks = other.listkeyspatterns(
+ b'bookmarks', patterns=scratchbookmarks
+ )
+ for bookmark in scratchbookmarks:
+ if bookmark not in fetchedbookmarks:
+ raise error.Abort(
+ b'remote bookmark %s not found!' % bookmark
+ )
+ scratchbookmarks[bookmark] = fetchedbookmarks[bookmark]
+ revs.append(fetchedbookmarks[bookmark])
+ finally:
+ other.close()
opts[b'bookmark'] = bookmarks
opts[b'rev'] = revs
@@ -848,10 +851,13 @@
if common.isremotebooksenabled(ui):
if bookmark and scratchpush:
other = hg.peer(repo, opts, destpath)
- fetchedbookmarks = other.listkeyspatterns(
- b'bookmarks', patterns=[bookmark]
- )
- remotescratchbookmarks.update(fetchedbookmarks)
+ try:
+ fetchedbookmarks = other.listkeyspatterns(
+ b'bookmarks', patterns=[bookmark]
+ )
+ remotescratchbookmarks.update(fetchedbookmarks)
+ finally:
+ other.close()
_saveremotebookmarks(repo, remotescratchbookmarks, destpath)
if oldphasemove:
exchange._localphasemove = oldphasemove
To: valentin.gatienbaron, durin42, martinvonz, #hg-reviewers
Cc: mercurial-patches, mercurial-devel
More information about the Mercurial-devel
mailing list