mirror of
https://github.com/samsonjs/vdirsyncer.git
synced 2026-03-25 08:55:50 +00:00
Bidirectional sync
This commit is contained in:
parent
a007828f87
commit
b44db992e7
6 changed files with 155 additions and 19 deletions
|
|
@ -1,5 +1,8 @@
|
|||
# An example configuration for vdirsyncer.
|
||||
|
||||
#
|
||||
# Move it to ~/.vdirsyncer/config or ~/.config/vdirsyncer/config and edit it.
|
||||
# Run `vdirsyncer --help` for CLI usage.
|
||||
#
|
||||
# Optional parameters are commented out.
|
||||
# This file doesn't document all available parameters, see
|
||||
# http://vdirsyncer.readthedocs.org/ for the rest of them.
|
||||
|
|
@ -27,6 +30,9 @@ b = bob_contacts_remote
|
|||
|
||||
collections = ["from b"]
|
||||
|
||||
# Synchronize the "display name" property into a local file (~/.contacts/displayname).
|
||||
metadata = ["displayname"]
|
||||
|
||||
# To resolve a conflict the following values are possible:
|
||||
# `null` - abort when collisions occur (default)
|
||||
# `"a wins"` - assume a's items to be more up-to-date
|
||||
|
|
@ -54,6 +60,9 @@ a = bob_calendar_local
|
|||
b = bob_calendar_remote
|
||||
collections = ["private", "work"]
|
||||
|
||||
# Calendars also have a color property
|
||||
metadata = ["displayname", "color"]
|
||||
|
||||
[storage bob_calendar_local]
|
||||
type = filesystem
|
||||
path = ~/.calendars/
|
||||
|
|
|
|||
|
|
@ -62,10 +62,11 @@ Pair Section
|
|||
|
||||
- ``a`` and ``b`` reference the storages to sync by their names.
|
||||
|
||||
- ``collections``: Optional, a list of collections to synchronize. If this
|
||||
parameter is omitted, it is assumed the storages are already directly
|
||||
pointing to one collection each. Specifying a collection multiple times won't
|
||||
make vdirsyncer sync that collection more than once.
|
||||
- ``collections``: Optional, a list of collections to synchronize when
|
||||
``vdirsyncer sync`` is executed. If this parameter is omitted, it is assumed
|
||||
the storages are already directly pointing to one collection each. Specifying
|
||||
a collection multiple times won't make vdirsyncer sync that collection more
|
||||
than once.
|
||||
|
||||
Furthermore, there are the special values ``"from a"`` and ``"from b"``,
|
||||
which tell vdirsyncer to try autodiscovery on a specific storage.
|
||||
|
|
@ -88,6 +89,14 @@ Pair Section
|
|||
Vdirsyncer will not attempt to merge the two items.
|
||||
- ``null``, the default, where an error is shown and no changes are done.
|
||||
|
||||
- ``metadata``: Metadata keys that should be synchronized when ``vdirsyncer
|
||||
metasync`` is executed. Example::
|
||||
|
||||
metadata = ["color", "displayname"]
|
||||
|
||||
This synchronizes the ``color`` and the ``displayname`` properties. The
|
||||
``conflict_resolution`` parameter applies here as well.
|
||||
|
||||
.. _storage_config:
|
||||
|
||||
Storage Section
|
||||
|
|
|
|||
|
|
@ -105,14 +105,19 @@ def sync(pairs, force_delete, max_workers):
|
|||
Synchronize the given pairs. If no arguments are given, all will be
|
||||
synchronized.
|
||||
|
||||
`vdirsyncer sync` will sync everything configured.
|
||||
This command will not synchronize metadata, use `vdirsyncer metasync` for
|
||||
that.
|
||||
|
||||
`vdirsyncer sync bob frank` will sync the pairs "bob" and "frank".
|
||||
Examples:
|
||||
|
||||
`vdirsyncer sync bob/first_collection` will sync "first_collection" from
|
||||
the pair "bob".
|
||||
`vdirsyncer sync` will sync everything configured.
|
||||
|
||||
`vdirsyncer sync bob frank` will sync the pairs "bob" and "frank".
|
||||
|
||||
`vdirsyncer sync bob/first_collection` will sync "first_collection"
|
||||
from the pair "bob".
|
||||
'''
|
||||
from .tasks import sync_pair
|
||||
from .tasks import prepare_pair, sync_collection
|
||||
from .utils import parse_pairs_args, WorkerQueue
|
||||
general, all_pairs, all_storages = ctx.obj['config']
|
||||
|
||||
|
|
@ -120,11 +125,39 @@ def sync(pairs, force_delete, max_workers):
|
|||
|
||||
for pair_name, collections in parse_pairs_args(pairs, all_pairs):
|
||||
wq.spawn_worker()
|
||||
wq.put(functools.partial(sync_pair, pair_name=pair_name,
|
||||
collections_to_sync=collections,
|
||||
wq.put(functools.partial(prepare_pair, pair_name=pair_name,
|
||||
collections=collections,
|
||||
general=general, all_pairs=all_pairs,
|
||||
all_storages=all_storages,
|
||||
force_delete=force_delete))
|
||||
force_delete=force_delete,
|
||||
callback=sync_collection))
|
||||
|
||||
wq.join()
|
||||
|
||||
|
||||
@app.command()
|
||||
@click.argument('pairs', nargs=-1)
|
||||
@max_workers_option
|
||||
@catch_errors
|
||||
def metasync(pairs, max_workers):
|
||||
'''
|
||||
Synchronize metadata of the given pairs.
|
||||
|
||||
See the `sync` command regarding the PAIRS argument.
|
||||
'''
|
||||
from .tasks import prepare_pair, metasync_collection
|
||||
from .utils import parse_pairs_args, WorkerQueue
|
||||
general, all_pairs, all_storages = ctx.obj['config']
|
||||
|
||||
wq = WorkerQueue(max_workers)
|
||||
|
||||
for pair_name, collections in parse_pairs_args(pairs, all_pairs):
|
||||
wq.spawn_worker()
|
||||
wq.put(functools.partial(prepare_pair, pair_name=pair_name,
|
||||
collections=collections,
|
||||
general=general, all_pairs=all_pairs,
|
||||
all_storages=all_storages,
|
||||
callback=metasync_collection))
|
||||
|
||||
wq.join()
|
||||
|
||||
|
|
|
|||
|
|
@ -10,8 +10,8 @@ from .utils import CliError, JobFailed, cli_logger, collections_for_pair, \
|
|||
from ..sync import sync
|
||||
|
||||
|
||||
def sync_pair(wq, pair_name, collections_to_sync, general, all_pairs,
|
||||
all_storages, force_delete):
|
||||
def prepare_pair(wq, pair_name, collections, general, all_pairs, all_storages,
|
||||
callback, **kwargs):
|
||||
a_name, b_name, pair_options = all_pairs[pair_name]
|
||||
|
||||
try:
|
||||
|
|
@ -28,7 +28,7 @@ def sync_pair(wq, pair_name, collections_to_sync, general, all_pairs,
|
|||
|
||||
# spawn one worker less because we can reuse the current one
|
||||
new_workers = -1
|
||||
for collection in (collections_to_sync or all_collections):
|
||||
for collection in (collections or all_collections):
|
||||
try:
|
||||
config_a, config_b = all_collections[collection]
|
||||
except KeyError:
|
||||
|
|
@ -37,9 +37,9 @@ def sync_pair(wq, pair_name, collections_to_sync, general, all_pairs,
|
|||
pair_name, collection, list(all_collections)))
|
||||
new_workers += 1
|
||||
wq.put(functools.partial(
|
||||
sync_collection, pair_name=pair_name, collection=collection,
|
||||
callback, pair_name=pair_name, collection=collection,
|
||||
config_a=config_a, config_b=config_b, pair_options=pair_options,
|
||||
general=general, force_delete=force_delete
|
||||
general=general, **kwargs
|
||||
))
|
||||
|
||||
for i in range(new_workers):
|
||||
|
|
@ -107,3 +107,30 @@ def repair_collection(general, all_pairs, all_storages, collection):
|
|||
cli_logger.info('Repairing {}/{}'.format(storage_name, collection))
|
||||
cli_logger.warning('Make sure no other program is talking to the server.')
|
||||
repair_storage(storage)
|
||||
|
||||
|
||||
def metasync_collection(wq, pair_name, collection, config_a, config_b,
|
||||
pair_options, general):
|
||||
from ..metasync import metasync
|
||||
status_name = get_status_name(pair_name, collection)
|
||||
|
||||
try:
|
||||
cli_logger.info('Metasyncing {}'.format(status_name))
|
||||
|
||||
status = load_status(general['status_path'], pair_name,
|
||||
collection, data_type='metadata') or {}
|
||||
|
||||
a = storage_instance_from_config(config_a)
|
||||
b = storage_instance_from_config(config_b)
|
||||
|
||||
metasync(
|
||||
a, b, status,
|
||||
conflict_resolution=pair_options.get('conflict_resolution', None),
|
||||
keys=pair_options.get('metadata', None) or ()
|
||||
)
|
||||
except:
|
||||
handle_cli_error(status_name)
|
||||
raise JobFailed()
|
||||
|
||||
save_status(general['status_path'], pair_name, collection,
|
||||
data_type='metadata', data=status)
|
||||
|
|
|
|||
54
vdirsyncer/metasync.py
Normal file
54
vdirsyncer/metasync.py
Normal file
|
|
@ -0,0 +1,54 @@
|
|||
from . import exceptions, log
|
||||
|
||||
logger = log.get(__name__)
|
||||
|
||||
|
||||
class MetaSyncError(exceptions.Error):
|
||||
pass
|
||||
|
||||
|
||||
class MetaSyncConflict(MetaSyncError):
|
||||
key = None
|
||||
|
||||
|
||||
def metasync(storage_a, storage_b, status, keys, conflict_resolution):
|
||||
def _a_to_b():
|
||||
logger.info(u'Copying {} to {}'.format(key, storage_b))
|
||||
storage_b.set_meta(key, a)
|
||||
status[key] = a
|
||||
|
||||
def _b_to_a():
|
||||
logger.info(u'Copying {} to {}'.format(key, storage_a))
|
||||
storage_a.set_meta(key, b)
|
||||
status[key] = b
|
||||
|
||||
def _resolve_conflict():
|
||||
if a == b:
|
||||
pass
|
||||
elif conflict_resolution is None:
|
||||
raise MetaSyncConflict(key=key)
|
||||
elif conflict_resolution == 'a wins':
|
||||
_a_to_b()
|
||||
elif conflict_resolution == 'b wins':
|
||||
_b_to_a()
|
||||
|
||||
for key in keys:
|
||||
a = storage_a.get_meta(key)
|
||||
b = storage_b.get_meta(key)
|
||||
s = status.get(key)
|
||||
logger.debug(u'Key: {}'.format(key))
|
||||
logger.debug(u'A: {}'.format(a))
|
||||
logger.debug(u'B: {}'.format(b))
|
||||
logger.debug(u'S: {}'.format(s))
|
||||
|
||||
if a != s and b != s:
|
||||
_resolve_conflict()
|
||||
elif a != s and b == s:
|
||||
_a_to_b()
|
||||
elif a == s and b != s:
|
||||
_b_to_a()
|
||||
else:
|
||||
assert a == b
|
||||
|
||||
for key in set(status) - set(keys):
|
||||
del status[key]
|
||||
|
|
@ -607,7 +607,11 @@ class DavStorage(Storage):
|
|||
data=data, headers=self.session.get_default_headers()
|
||||
)
|
||||
|
||||
# FIXME: Deal with response
|
||||
# XXX: Response content is currently ignored. Though exceptions are
|
||||
# raised for HTTP errors, a multistatus with errorcodes inside is not
|
||||
# parsed yet. Not sure how common those are, or how they look like. It
|
||||
# might be easier (and safer in case of a stupid server) to just issue
|
||||
# a PROPFIND to see if the value got actually set.
|
||||
|
||||
|
||||
class CaldavStorage(DavStorage):
|
||||
|
|
|
|||
Loading…
Reference in a new issue