Source code distributed/cli/dask_ssh.py

  1
  2
  3
  4
  5
  6
  7
  8
  9
 10
 11
 12
 13
 14
 15
 16
 17
 18
 19
 20
 21
 22
 23
 24
 25
 26
 27
 28
 29
 30
 31
 32
 33
 34
 35
 36
 37
 38
 39
 40
 41
 42
 43
 44
 45
 46
 47
 48
 49
 50
 51
 52
 53
 54
 55
 56
 57
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
from distributed.deploy.old_ssh import SSHCluster
import click

from distributed.cli.utils import check_python_3


@click.command(
    help="""Launch a distributed cluster over SSH. A 'dask-scheduler' process will run on the
                         first host specified in [HOSTNAMES] or in the hostfile (unless --scheduler is specified
                         explicitly). One or more 'dask-worker' processes will be run each host in [HOSTNAMES] or
                         in the hostfile. Use command line flags to adjust how many dask-worker process are run on
                         each host (--nprocs) and how many cpus are used by each dask-worker process (--nthreads)."""
)
@click.option(
    "--scheduler",
    default=None,
    type=str,
    help="Specify scheduler node.  Defaults to first address.",
)
@click.option(
    "--scheduler-port",
    default=8786,
    show_default=True,
    type=int,
    help="Specify scheduler port number.",
)
@click.option(
    "--nthreads",
    default=0,
    type=int,
    help=(
        "Number of threads per worker process. "
        "Defaults to number of cores divided by the number of "
        "processes per host."
    ),
)
@click.option(
    "--nprocs",
    default=1,
    show_default=True,
    type=int,
    help="Number of worker processes per host.",
)
@click.argument("hostnames", nargs=-1, type=str)
@click.option(
    "--hostfile",
    default=None,
    type=click.Path(exists=True),
    help="Textfile with hostnames/IP addresses",
)
@click.option(
    "--ssh-username",
    default=None,
    type=str,
    help="Username to use when establishing SSH connections.",
)
@click.option(
    "--ssh-port",
    default=22,
    type=int,
    show_default=True,
    help="Port to use for SSH connections.",
)
@click.option(
    "--ssh-private-key",
    default=None,
    type=str,
    help="Private key file to use for SSH connections.",
)
@click.option("--nohost", is_flag=True, help="Do not pass the hostname to the worker.")
@click.option(
    "--log-directory",
    default=None,
    type=click.Path(exists=True),
    help=(
        "Directory to use on all cluster nodes for the output of "
        "dask-scheduler and dask-worker commands."
    ),
)
@click.option(
    "--local-directory",
    default=None,
    type=click.Path(exists=True),
    help=("Directory to use on all cluster nodes to place workers files."),
)
@click.option(
    "--remote-python", default=None, type=str, help="Path to Python on remote nodes."
)
@click.option(
    "--memory-limit",
    default="auto",
    show_default=True,
    help="Bytes of memory that the worker can use. "
    "This can be an integer (bytes), "
    "float (fraction of total system memory), "
    "string (like 5GB or 5000M), "
    "'auto', or zero for no memory management",
)
@click.option(
    "--worker-port",
    type=int,
    default=0,
    help="Serving computation port, defaults to random",
)
@click.option(
    "--nanny-port", type=int, default=0, help="Serving nanny port, defaults to random"
)
@click.option(
    "--remote-dask-worker",
    default="distributed.cli.dask_worker",
    show_default=True,
    type=str,
    help="Worker to run.",
)
@click.pass_context
@click.version_option()
def main(
    ctx,
    scheduler,
    scheduler_port,
    hostnames,
    hostfile,
    nthreads,
    nprocs,
    ssh_username,
    ssh_port,
    ssh_private_key,
    nohost,
    log_directory,
    remote_python,
    memory_limit,
    worker_port,
    nanny_port,
    remote_dask_worker,
    local_directory,
):
    try:
        hostnames = list(hostnames)
        if hostfile:
            with open(hostfile) as f:
                hosts = f.read().split()
            hostnames.extend(hosts)

        if not scheduler:
            scheduler = hostnames[0]

    except IndexError:
        print(ctx.get_help())
        exit(1)

    c = SSHCluster(
        scheduler,
        scheduler_port,
        hostnames,
        nthreads,
        nprocs,
        ssh_username,
        ssh_port,
        ssh_private_key,
        nohost,
        log_directory,
        remote_python,
        memory_limit,
        worker_port,
        nanny_port,
        remote_dask_worker,
        local_directory,
    )

    import distributed

    print("\n---------------------------------------------------------------")
    print(
        "                 Dask.distributed v{version}\n".format(
            version=distributed.__version__
        )
    )
    print("Worker nodes: {n}".format(n=len(hostnames)))
    for i, host in enumerate(hostnames):
        print("  {num}: {host}".format(num=i, host=host))
    print("\nscheduler node: {addr}:{port}".format(addr=scheduler, port=scheduler_port))
    print("---------------------------------------------------------------\n\n")

    # Monitor the output of remote processes.  This blocks until the user issues a KeyboardInterrupt.
    c.monitor_remote_processes()

    # Close down the remote processes and exit.
    print("\n[ dask-ssh ]: Shutting down remote processes (this may take a moment).")
    c.shutdown()
    print("[ dask-ssh ]: Remote processes have been terminated. Exiting.")


def go():
    check_python_3()
    main()


if __name__ == "__main__":
    go()