diff --git "a/dataset-gathering.ipynb" "b/dataset-gathering.ipynb" --- "a/dataset-gathering.ipynb" +++ "b/dataset-gathering.ipynb" @@ -24,73 +24,37 @@ }, { "cell_type": "code", - "execution_count": 119, - "id": "5964d35c-a176-4dba-9449-5fbfa1766164", + "execution_count": 120, + "id": "ebe058f9-f62d-4978-890a-3e8200999779", "metadata": {}, - "outputs": [ - { - "data": { - "text/plain": [ - "\u001b[0;31mInit signature:\u001b[0m\n", - "\u001b[0mGithub\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m\u001b[0m\n", - "\u001b[0;34m\u001b[0m \u001b[0mlogin_or_token\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0;32mNone\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\n", - "\u001b[0;34m\u001b[0m \u001b[0mpassword\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0;32mNone\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\n", - "\u001b[0;34m\u001b[0m \u001b[0mjwt\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0;32mNone\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\n", - "\u001b[0;34m\u001b[0m \u001b[0mbase_url\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0;34m'https://api.github.com'\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\n", - "\u001b[0;34m\u001b[0m \u001b[0mtimeout\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0;36m15\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\n", - "\u001b[0;34m\u001b[0m \u001b[0muser_agent\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0;34m'PyGithub/Python'\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\n", - "\u001b[0;34m\u001b[0m \u001b[0mper_page\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0;36m30\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\n", - "\u001b[0;34m\u001b[0m \u001b[0mverify\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0;32mTrue\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\n", - "\u001b[0;34m\u001b[0m \u001b[0mretry\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0;32mNone\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\n", - "\u001b[0;34m\u001b[0m \u001b[0mpool_size\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0;32mNone\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\n", - "\u001b[0;34m\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", - "\u001b[0;31mDocstring:\u001b[0m This is the main class you instantiate to access the Github API v3. Optional parameters allow different authentication methods.\n", - "\u001b[0;31mInit docstring:\u001b[0m\n", - ":param login_or_token: string\n", - ":param password: string\n", - ":param base_url: string\n", - ":param timeout: integer\n", - ":param user_agent: string\n", - ":param per_page: int\n", - ":param verify: boolean or string\n", - ":param retry: int or urllib3.util.retry.Retry object\n", - ":param pool_size: int\n", - "\u001b[0;31mFile:\u001b[0m ~/miniconda3/envs/accelerate/lib/python3.9/site-packages/github/MainClass.py\n", - "\u001b[0;31mType:\u001b[0m type\n", - "\u001b[0;31mSubclasses:\u001b[0m \n" - ] - }, - "metadata": {}, - "output_type": "display_data" - } - ], + "outputs": [], "source": [ - "Github?" + "tok = \"MYGITHUBTOKEN\"" ] }, { "cell_type": "code", - "execution_count": 120, - "id": "ebe058f9-f62d-4978-890a-3e8200999779", + "execution_count": 157, + "id": "19d37444-e62e-4e31-bce8-6cd1ee63e133", "metadata": {}, "outputs": [], "source": [ - "tok = \"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\"" + "from tqdm.notebook import tqdm" ] }, { "cell_type": "code", - "execution_count": 121, + "execution_count": 155, "id": "fd25eafa-fb29-4051-ae0a-f71a0dbc92db", "metadata": {}, "outputs": [], "source": [ - "gh = Github(\"muellerzr\", tok)" + "gh = Github(\"MYGITNAME\", tok)" ] }, { "cell_type": "code", - "execution_count": 146, + "execution_count": 160, "id": "f3b9c5f6-31a0-49b2-834b-982952c6b7e5", "metadata": {}, "outputs": [], @@ -100,35 +64,48 @@ }, { "cell_type": "code", - "execution_count": 148, + "execution_count": 162, "id": "c60588b7-56de-457f-93a6-3e8b3b8965c8", "metadata": {}, "outputs": [ { - "ename": "KeyboardInterrupt", - "evalue": "", - "output_type": "error", - "traceback": [ - "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m", - "\u001b[0;31mKeyboardInterrupt\u001b[0m Traceback (most recent call last)", - "Input \u001b[0;32mIn [148]\u001b[0m, in \u001b[0;36m\u001b[0;34m()\u001b[0m\n\u001b[1;32m 6\u001b[0m \u001b[38;5;28;01mfor\u001b[39;00m pull \u001b[38;5;129;01min\u001b[39;00m page:\n\u001b[1;32m 7\u001b[0m pull_id \u001b[38;5;241m=\u001b[39m pull\u001b[38;5;241m.\u001b[39mid\n\u001b[0;32m----> 8\u001b[0m pull_diff \u001b[38;5;241m=\u001b[39m \u001b[43mget_diff\u001b[49m\u001b[43m(\u001b[49m\u001b[43mpull\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mdiff_url\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 9\u001b[0m pull_user \u001b[38;5;241m=\u001b[39m pull\u001b[38;5;241m.\u001b[39muser\u001b[38;5;241m.\u001b[39mlogin\n\u001b[1;32m 10\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m pull\u001b[38;5;241m.\u001b[39mis_merged():\n", - "Input \u001b[0;32mIn [39]\u001b[0m, in \u001b[0;36mget_diff\u001b[0;34m(url)\u001b[0m\n\u001b[1;32m 1\u001b[0m \u001b[38;5;28;01mdef\u001b[39;00m \u001b[38;5;21mget_diff\u001b[39m(url):\n\u001b[0;32m----> 2\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[43mrequests\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mget\u001b[49m\u001b[43m(\u001b[49m\u001b[43murl\u001b[49m\u001b[43m)\u001b[49m\u001b[38;5;241m.\u001b[39mcontent\u001b[38;5;241m.\u001b[39mdecode()\n", - "File \u001b[0;32m~/miniconda3/envs/accelerate/lib/python3.9/site-packages/requests/api.py:73\u001b[0m, in \u001b[0;36mget\u001b[0;34m(url, params, **kwargs)\u001b[0m\n\u001b[1;32m 62\u001b[0m \u001b[38;5;28;01mdef\u001b[39;00m \u001b[38;5;21mget\u001b[39m(url, params\u001b[38;5;241m=\u001b[39m\u001b[38;5;28;01mNone\u001b[39;00m, \u001b[38;5;241m*\u001b[39m\u001b[38;5;241m*\u001b[39mkwargs):\n\u001b[1;32m 63\u001b[0m \u001b[38;5;124mr\u001b[39m\u001b[38;5;124;03m\"\"\"Sends a GET request.\u001b[39;00m\n\u001b[1;32m 64\u001b[0m \n\u001b[1;32m 65\u001b[0m \u001b[38;5;124;03m :param url: URL for the new :class:`Request` object.\u001b[39;00m\n\u001b[0;32m (...)\u001b[0m\n\u001b[1;32m 70\u001b[0m \u001b[38;5;124;03m :rtype: requests.Response\u001b[39;00m\n\u001b[1;32m 71\u001b[0m \u001b[38;5;124;03m \"\"\"\u001b[39;00m\n\u001b[0;32m---> 73\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[43mrequest\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[38;5;124;43mget\u001b[39;49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43murl\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mparams\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mparams\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43mkwargs\u001b[49m\u001b[43m)\u001b[49m\n", - "File \u001b[0;32m~/miniconda3/envs/accelerate/lib/python3.9/site-packages/requests/api.py:59\u001b[0m, in \u001b[0;36mrequest\u001b[0;34m(method, url, **kwargs)\u001b[0m\n\u001b[1;32m 55\u001b[0m \u001b[38;5;66;03m# By using the 'with' statement we are sure the session is closed, thus we\u001b[39;00m\n\u001b[1;32m 56\u001b[0m \u001b[38;5;66;03m# avoid leaving sockets open which can trigger a ResourceWarning in some\u001b[39;00m\n\u001b[1;32m 57\u001b[0m \u001b[38;5;66;03m# cases, and look like a memory leak in others.\u001b[39;00m\n\u001b[1;32m 58\u001b[0m \u001b[38;5;28;01mwith\u001b[39;00m sessions\u001b[38;5;241m.\u001b[39mSession() \u001b[38;5;28;01mas\u001b[39;00m session:\n\u001b[0;32m---> 59\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[43msession\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mrequest\u001b[49m\u001b[43m(\u001b[49m\u001b[43mmethod\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mmethod\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43murl\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43murl\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43mkwargs\u001b[49m\u001b[43m)\u001b[49m\n", - "File \u001b[0;32m~/miniconda3/envs/accelerate/lib/python3.9/site-packages/requests/sessions.py:587\u001b[0m, in \u001b[0;36mSession.request\u001b[0;34m(self, method, url, params, data, headers, cookies, files, auth, timeout, allow_redirects, proxies, hooks, stream, verify, cert, json)\u001b[0m\n\u001b[1;32m 582\u001b[0m send_kwargs \u001b[38;5;241m=\u001b[39m {\n\u001b[1;32m 583\u001b[0m \u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mtimeout\u001b[39m\u001b[38;5;124m\"\u001b[39m: timeout,\n\u001b[1;32m 584\u001b[0m \u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mallow_redirects\u001b[39m\u001b[38;5;124m\"\u001b[39m: allow_redirects,\n\u001b[1;32m 585\u001b[0m }\n\u001b[1;32m 586\u001b[0m send_kwargs\u001b[38;5;241m.\u001b[39mupdate(settings)\n\u001b[0;32m--> 587\u001b[0m resp \u001b[38;5;241m=\u001b[39m \u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43msend\u001b[49m\u001b[43m(\u001b[49m\u001b[43mprep\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43msend_kwargs\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 589\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m resp\n", - "File \u001b[0;32m~/miniconda3/envs/accelerate/lib/python3.9/site-packages/requests/sessions.py:701\u001b[0m, in \u001b[0;36mSession.send\u001b[0;34m(self, request, **kwargs)\u001b[0m\n\u001b[1;32m 698\u001b[0m start \u001b[38;5;241m=\u001b[39m preferred_clock()\n\u001b[1;32m 700\u001b[0m \u001b[38;5;66;03m# Send the request\u001b[39;00m\n\u001b[0;32m--> 701\u001b[0m r \u001b[38;5;241m=\u001b[39m \u001b[43madapter\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43msend\u001b[49m\u001b[43m(\u001b[49m\u001b[43mrequest\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43mkwargs\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 703\u001b[0m \u001b[38;5;66;03m# Total elapsed time of the request (approximately)\u001b[39;00m\n\u001b[1;32m 704\u001b[0m elapsed \u001b[38;5;241m=\u001b[39m preferred_clock() \u001b[38;5;241m-\u001b[39m start\n", - "File \u001b[0;32m~/miniconda3/envs/accelerate/lib/python3.9/site-packages/requests/adapters.py:489\u001b[0m, in \u001b[0;36mHTTPAdapter.send\u001b[0;34m(self, request, stream, timeout, verify, cert, proxies)\u001b[0m\n\u001b[1;32m 487\u001b[0m \u001b[38;5;28;01mtry\u001b[39;00m:\n\u001b[1;32m 488\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;129;01mnot\u001b[39;00m chunked:\n\u001b[0;32m--> 489\u001b[0m resp \u001b[38;5;241m=\u001b[39m \u001b[43mconn\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43murlopen\u001b[49m\u001b[43m(\u001b[49m\n\u001b[1;32m 490\u001b[0m \u001b[43m \u001b[49m\u001b[43mmethod\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mrequest\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mmethod\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 491\u001b[0m \u001b[43m \u001b[49m\u001b[43murl\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43murl\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 492\u001b[0m \u001b[43m \u001b[49m\u001b[43mbody\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mrequest\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mbody\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 493\u001b[0m \u001b[43m \u001b[49m\u001b[43mheaders\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mrequest\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mheaders\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 494\u001b[0m \u001b[43m \u001b[49m\u001b[43mredirect\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[38;5;28;43;01mFalse\u001b[39;49;00m\u001b[43m,\u001b[49m\n\u001b[1;32m 495\u001b[0m \u001b[43m \u001b[49m\u001b[43massert_same_host\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[38;5;28;43;01mFalse\u001b[39;49;00m\u001b[43m,\u001b[49m\n\u001b[1;32m 496\u001b[0m \u001b[43m \u001b[49m\u001b[43mpreload_content\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[38;5;28;43;01mFalse\u001b[39;49;00m\u001b[43m,\u001b[49m\n\u001b[1;32m 497\u001b[0m \u001b[43m \u001b[49m\u001b[43mdecode_content\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[38;5;28;43;01mFalse\u001b[39;49;00m\u001b[43m,\u001b[49m\n\u001b[1;32m 498\u001b[0m \u001b[43m \u001b[49m\u001b[43mretries\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mmax_retries\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 499\u001b[0m \u001b[43m \u001b[49m\u001b[43mtimeout\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mtimeout\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 500\u001b[0m \u001b[43m \u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 502\u001b[0m \u001b[38;5;66;03m# Send the request.\u001b[39;00m\n\u001b[1;32m 503\u001b[0m \u001b[38;5;28;01melse\u001b[39;00m:\n\u001b[1;32m 504\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;28mhasattr\u001b[39m(conn, \u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mproxy_pool\u001b[39m\u001b[38;5;124m\"\u001b[39m):\n", - "File \u001b[0;32m~/miniconda3/envs/accelerate/lib/python3.9/site-packages/urllib3/connectionpool.py:703\u001b[0m, in \u001b[0;36mHTTPConnectionPool.urlopen\u001b[0;34m(self, method, url, body, headers, retries, redirect, assert_same_host, timeout, pool_timeout, release_conn, chunked, body_pos, **response_kw)\u001b[0m\n\u001b[1;32m 700\u001b[0m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_prepare_proxy(conn)\n\u001b[1;32m 702\u001b[0m \u001b[38;5;66;03m# Make the request on the httplib connection object.\u001b[39;00m\n\u001b[0;32m--> 703\u001b[0m httplib_response \u001b[38;5;241m=\u001b[39m \u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43m_make_request\u001b[49m\u001b[43m(\u001b[49m\n\u001b[1;32m 704\u001b[0m \u001b[43m \u001b[49m\u001b[43mconn\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 705\u001b[0m \u001b[43m \u001b[49m\u001b[43mmethod\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 706\u001b[0m \u001b[43m \u001b[49m\u001b[43murl\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 707\u001b[0m \u001b[43m \u001b[49m\u001b[43mtimeout\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mtimeout_obj\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 708\u001b[0m \u001b[43m \u001b[49m\u001b[43mbody\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mbody\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 709\u001b[0m \u001b[43m \u001b[49m\u001b[43mheaders\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mheaders\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 710\u001b[0m \u001b[43m \u001b[49m\u001b[43mchunked\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mchunked\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 711\u001b[0m \u001b[43m\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 713\u001b[0m \u001b[38;5;66;03m# If we're going to release the connection in ``finally:``, then\u001b[39;00m\n\u001b[1;32m 714\u001b[0m \u001b[38;5;66;03m# the response doesn't need to know about the connection. Otherwise\u001b[39;00m\n\u001b[1;32m 715\u001b[0m \u001b[38;5;66;03m# it will also try to release it and we'll have a double-release\u001b[39;00m\n\u001b[1;32m 716\u001b[0m \u001b[38;5;66;03m# mess.\u001b[39;00m\n\u001b[1;32m 717\u001b[0m response_conn \u001b[38;5;241m=\u001b[39m conn \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;129;01mnot\u001b[39;00m release_conn \u001b[38;5;28;01melse\u001b[39;00m \u001b[38;5;28;01mNone\u001b[39;00m\n", - "File \u001b[0;32m~/miniconda3/envs/accelerate/lib/python3.9/site-packages/urllib3/connectionpool.py:449\u001b[0m, in \u001b[0;36mHTTPConnectionPool._make_request\u001b[0;34m(self, conn, method, url, timeout, chunked, **httplib_request_kw)\u001b[0m\n\u001b[1;32m 444\u001b[0m httplib_response \u001b[38;5;241m=\u001b[39m conn\u001b[38;5;241m.\u001b[39mgetresponse()\n\u001b[1;32m 445\u001b[0m \u001b[38;5;28;01mexcept\u001b[39;00m \u001b[38;5;167;01mBaseException\u001b[39;00m \u001b[38;5;28;01mas\u001b[39;00m e:\n\u001b[1;32m 446\u001b[0m \u001b[38;5;66;03m# Remove the TypeError from the exception chain in\u001b[39;00m\n\u001b[1;32m 447\u001b[0m \u001b[38;5;66;03m# Python 3 (including for exceptions like SystemExit).\u001b[39;00m\n\u001b[1;32m 448\u001b[0m \u001b[38;5;66;03m# Otherwise it looks like a bug in the code.\u001b[39;00m\n\u001b[0;32m--> 449\u001b[0m \u001b[43msix\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mraise_from\u001b[49m\u001b[43m(\u001b[49m\u001b[43me\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;28;43;01mNone\u001b[39;49;00m\u001b[43m)\u001b[49m\n\u001b[1;32m 450\u001b[0m \u001b[38;5;28;01mexcept\u001b[39;00m (SocketTimeout, BaseSSLError, SocketError) \u001b[38;5;28;01mas\u001b[39;00m e:\n\u001b[1;32m 451\u001b[0m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_raise_timeout(err\u001b[38;5;241m=\u001b[39me, url\u001b[38;5;241m=\u001b[39murl, timeout_value\u001b[38;5;241m=\u001b[39mread_timeout)\n", - "File \u001b[0;32m:3\u001b[0m, in \u001b[0;36mraise_from\u001b[0;34m(value, from_value)\u001b[0m\n", - "File \u001b[0;32m~/miniconda3/envs/accelerate/lib/python3.9/site-packages/urllib3/connectionpool.py:444\u001b[0m, in \u001b[0;36mHTTPConnectionPool._make_request\u001b[0;34m(self, conn, method, url, timeout, chunked, **httplib_request_kw)\u001b[0m\n\u001b[1;32m 441\u001b[0m \u001b[38;5;28;01mexcept\u001b[39;00m \u001b[38;5;167;01mTypeError\u001b[39;00m:\n\u001b[1;32m 442\u001b[0m \u001b[38;5;66;03m# Python 3\u001b[39;00m\n\u001b[1;32m 443\u001b[0m \u001b[38;5;28;01mtry\u001b[39;00m:\n\u001b[0;32m--> 444\u001b[0m httplib_response \u001b[38;5;241m=\u001b[39m \u001b[43mconn\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mgetresponse\u001b[49m\u001b[43m(\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 445\u001b[0m \u001b[38;5;28;01mexcept\u001b[39;00m \u001b[38;5;167;01mBaseException\u001b[39;00m \u001b[38;5;28;01mas\u001b[39;00m e:\n\u001b[1;32m 446\u001b[0m \u001b[38;5;66;03m# Remove the TypeError from the exception chain in\u001b[39;00m\n\u001b[1;32m 447\u001b[0m \u001b[38;5;66;03m# Python 3 (including for exceptions like SystemExit).\u001b[39;00m\n\u001b[1;32m 448\u001b[0m \u001b[38;5;66;03m# Otherwise it looks like a bug in the code.\u001b[39;00m\n\u001b[1;32m 449\u001b[0m six\u001b[38;5;241m.\u001b[39mraise_from(e, \u001b[38;5;28;01mNone\u001b[39;00m)\n", - "File \u001b[0;32m~/miniconda3/envs/accelerate/lib/python3.9/http/client.py:1377\u001b[0m, in \u001b[0;36mHTTPConnection.getresponse\u001b[0;34m(self)\u001b[0m\n\u001b[1;32m 1375\u001b[0m \u001b[38;5;28;01mtry\u001b[39;00m:\n\u001b[1;32m 1376\u001b[0m \u001b[38;5;28;01mtry\u001b[39;00m:\n\u001b[0;32m-> 1377\u001b[0m \u001b[43mresponse\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mbegin\u001b[49m\u001b[43m(\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 1378\u001b[0m \u001b[38;5;28;01mexcept\u001b[39;00m \u001b[38;5;167;01mConnectionError\u001b[39;00m:\n\u001b[1;32m 1379\u001b[0m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mclose()\n", - "File \u001b[0;32m~/miniconda3/envs/accelerate/lib/python3.9/http/client.py:320\u001b[0m, in \u001b[0;36mHTTPResponse.begin\u001b[0;34m(self)\u001b[0m\n\u001b[1;32m 318\u001b[0m \u001b[38;5;66;03m# read until we get a non-100 response\u001b[39;00m\n\u001b[1;32m 319\u001b[0m \u001b[38;5;28;01mwhile\u001b[39;00m \u001b[38;5;28;01mTrue\u001b[39;00m:\n\u001b[0;32m--> 320\u001b[0m version, status, reason \u001b[38;5;241m=\u001b[39m \u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43m_read_status\u001b[49m\u001b[43m(\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 321\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m status \u001b[38;5;241m!=\u001b[39m CONTINUE:\n\u001b[1;32m 322\u001b[0m \u001b[38;5;28;01mbreak\u001b[39;00m\n", - "File \u001b[0;32m~/miniconda3/envs/accelerate/lib/python3.9/http/client.py:281\u001b[0m, in \u001b[0;36mHTTPResponse._read_status\u001b[0;34m(self)\u001b[0m\n\u001b[1;32m 280\u001b[0m \u001b[38;5;28;01mdef\u001b[39;00m \u001b[38;5;21m_read_status\u001b[39m(\u001b[38;5;28mself\u001b[39m):\n\u001b[0;32m--> 281\u001b[0m line \u001b[38;5;241m=\u001b[39m \u001b[38;5;28mstr\u001b[39m(\u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mfp\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mreadline\u001b[49m\u001b[43m(\u001b[49m\u001b[43m_MAXLINE\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;241;43m+\u001b[39;49m\u001b[43m \u001b[49m\u001b[38;5;241;43m1\u001b[39;49m\u001b[43m)\u001b[49m, \u001b[38;5;124m\"\u001b[39m\u001b[38;5;124miso-8859-1\u001b[39m\u001b[38;5;124m\"\u001b[39m)\n\u001b[1;32m 282\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;28mlen\u001b[39m(line) \u001b[38;5;241m>\u001b[39m _MAXLINE:\n\u001b[1;32m 283\u001b[0m \u001b[38;5;28;01mraise\u001b[39;00m LineTooLong(\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mstatus line\u001b[39m\u001b[38;5;124m\"\u001b[39m)\n", - "File \u001b[0;32m~/miniconda3/envs/accelerate/lib/python3.9/socket.py:704\u001b[0m, in \u001b[0;36mSocketIO.readinto\u001b[0;34m(self, b)\u001b[0m\n\u001b[1;32m 702\u001b[0m \u001b[38;5;28;01mwhile\u001b[39;00m \u001b[38;5;28;01mTrue\u001b[39;00m:\n\u001b[1;32m 703\u001b[0m \u001b[38;5;28;01mtry\u001b[39;00m:\n\u001b[0;32m--> 704\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43m_sock\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mrecv_into\u001b[49m\u001b[43m(\u001b[49m\u001b[43mb\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 705\u001b[0m \u001b[38;5;28;01mexcept\u001b[39;00m timeout:\n\u001b[1;32m 706\u001b[0m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_timeout_occurred \u001b[38;5;241m=\u001b[39m \u001b[38;5;28;01mTrue\u001b[39;00m\n", - "File \u001b[0;32m~/miniconda3/envs/accelerate/lib/python3.9/ssl.py:1241\u001b[0m, in \u001b[0;36mSSLSocket.recv_into\u001b[0;34m(self, buffer, nbytes, flags)\u001b[0m\n\u001b[1;32m 1237\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m flags \u001b[38;5;241m!=\u001b[39m \u001b[38;5;241m0\u001b[39m:\n\u001b[1;32m 1238\u001b[0m \u001b[38;5;28;01mraise\u001b[39;00m \u001b[38;5;167;01mValueError\u001b[39;00m(\n\u001b[1;32m 1239\u001b[0m \u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mnon-zero flags not allowed in calls to recv_into() on \u001b[39m\u001b[38;5;132;01m%s\u001b[39;00m\u001b[38;5;124m\"\u001b[39m \u001b[38;5;241m%\u001b[39m\n\u001b[1;32m 1240\u001b[0m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m\u001b[38;5;18m__class__\u001b[39m)\n\u001b[0;32m-> 1241\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mread\u001b[49m\u001b[43m(\u001b[49m\u001b[43mnbytes\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mbuffer\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 1242\u001b[0m \u001b[38;5;28;01melse\u001b[39;00m:\n\u001b[1;32m 1243\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[38;5;28msuper\u001b[39m()\u001b[38;5;241m.\u001b[39mrecv_into(buffer, nbytes, flags)\n", - "File \u001b[0;32m~/miniconda3/envs/accelerate/lib/python3.9/ssl.py:1099\u001b[0m, in \u001b[0;36mSSLSocket.read\u001b[0;34m(self, len, buffer)\u001b[0m\n\u001b[1;32m 1097\u001b[0m \u001b[38;5;28;01mtry\u001b[39;00m:\n\u001b[1;32m 1098\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m buffer \u001b[38;5;129;01mis\u001b[39;00m \u001b[38;5;129;01mnot\u001b[39;00m \u001b[38;5;28;01mNone\u001b[39;00m:\n\u001b[0;32m-> 1099\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43m_sslobj\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mread\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;28;43mlen\u001b[39;49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mbuffer\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 1100\u001b[0m \u001b[38;5;28;01melse\u001b[39;00m:\n\u001b[1;32m 1101\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_sslobj\u001b[38;5;241m.\u001b[39mread(\u001b[38;5;28mlen\u001b[39m)\n", - "\u001b[0;31mKeyboardInterrupt\u001b[0m: " + "name": "stdout", + "output_type": "stream", + "text": [ + "0 out of 17\n", + "Data length: 29\n", + "1 out of 17\n", + "Data length: 53\n", + "2 out of 17\n", + "Data length: 79\n", + "3 out of 17\n", + "Data length: 107\n", + "4 out of 17\n", + "Data length: 134\n", + "5 out of 17\n", + "Data length: 159\n", + "6 out of 17\n", + "Data length: 187\n", + "7 out of 17\n", + "Data length: 211\n", + "8 out of 17\n", + "Data length: 239\n", + "9 out of 17\n", + "Data length: 266\n", + "10 out of 17\n", + "Data length: 295\n", + "11 out of 17\n", + "Data length: 323\n", + "12 out of 17\n", + "Data length: 353\n", + "13 out of 17\n", + "Data length: 380\n", + "14 out of 17\n", + "Data length: 409\n", + "15 out of 17\n", + "Data length: 439\n", + "16 out of 17\n", + "Data length: 454\n" ] } ], @@ -136,6 +113,7 @@ "repo = gh.get_repo(\"huggingface/accelerate\")\n", "pulls = repo.get_pulls(state=\"closed\")\n", "total_pages = (pulls.totalCount // 30) + 1\n", + "total = pulls.totalCount\n", "for page_id in range(total_pages):\n", " page = pulls.get_page(page_id)\n", " for pull in page:\n", @@ -166,2394 +144,57 @@ " \"code_comments\":code_comments, \n", " \"context\":conversation,\n", " \"url\":pull.url\n", - " } " + " } \n", + " print(f\"{page_id} out of {total_pages}\")\n", + " print(f'Data length: {len(data)}')" ] }, { "cell_type": "code", - "execution_count": 149, + "execution_count": 164, "id": "16140ee6-88e7-4cdf-b215-2282f1034dcf", "metadata": {}, - "outputs": [ - { - "data": { - "text/plain": [ - "184" - ] - }, - "execution_count": 149, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "len(data)" - ] - }, - { - "cell_type": "code", - "execution_count": 150, - "id": "7763a1aa-727c-41c9-b348-a4adaaca8445", - "metadata": {}, - "outputs": [ - { - "data": { - "text/plain": [ - "{1188526483: {'diff': 'diff --git a/src/accelerate/utils/modeling.py b/src/accelerate/utils/modeling.py\\nindex c58cfeb50..679e57ff2 100644\\n--- a/src/accelerate/utils/modeling.py\\n+++ b/src/accelerate/utils/modeling.py\\n@@ -666,7 +666,7 @@ def load_checkpoint_in_model(\\n elif len(potential_index) == 1:\\n index_filename = os.path.join(checkpoint, potential_index[0])\\n else:\\n- raise ValueError(f\"{checkpoint} containing mote than one `.index.json` file, delete the irrelevant ones.\")\\n+ raise ValueError(f\"{checkpoint} containing more than one `.index.json` file, delete the irrelevant ones.\")\\n else:\\n raise ValueError(\\n \"`checkpoint` should be the path to a file containing a whole state dict, or the index of a sharded \"\\n',\n", - " 'code_comments': [],\n", - " 'context': [{'body': '_The documentation is not available anymore as the PR was closed or merged._',\n", - " 'from_author': False}],\n", - " 'url': 'https://api.github.com/repos/huggingface/accelerate/pulls/955'},\n", - " 1183897941: {'diff': 'diff --git a/src/accelerate/big_modeling.py b/src/accelerate/big_modeling.py\\nindex d3247a464..75a2c06f6 100644\\n--- a/src/accelerate/big_modeling.py\\n+++ b/src/accelerate/big_modeling.py\\n@@ -404,6 +404,7 @@ def load_checkpoint_and_dispatch(\\n offload_folder=offload_folder,\\n dtype=dtype,\\n offload_state_dict=offload_state_dict,\\n+ offload_buffers=offload_buffers,\\n )\\n if device_map is None:\\n return model\\ndiff --git a/src/accelerate/utils/modeling.py b/src/accelerate/utils/modeling.py\\nindex 2774b18ce..c58cfeb50 100644\\n--- a/src/accelerate/utils/modeling.py\\n+++ b/src/accelerate/utils/modeling.py\\n@@ -607,6 +607,7 @@ def load_checkpoint_in_model(\\n offload_folder: Optional[Union[str, os.PathLike]] = None,\\n dtype: Optional[Union[str, torch.dtype]] = None,\\n offload_state_dict: bool = False,\\n+ offload_buffers: bool = False,\\n ):\\n \"\"\"\\n Loads a (potentially sharded) checkpoint inside a model, potentially sending weights to a given device as they are\\n@@ -636,6 +637,8 @@ def load_checkpoint_in_model(\\n offload_state_dict (`bool`, *optional*, defaults to `False`):\\n If `True`, will temporarily offload the CPU state dict on the hard drive to avoid getting out of CPU RAM if\\n the weight of the CPU state dict + the biggest shard does not fit.\\n+ offload_buffers (`bool`, *optional*, defaults to `False):\\n+ Whether or not to include the buffers in the weights offloaded to disk.\\n \"\"\"\\n if offload_folder is None and device_map is not None and \"disk\" in device_map.values():\\n raise ValueError(\\n@@ -687,6 +690,8 @@ def load_checkpoint_in_model(\\n state_dict_folder = tempfile.mkdtemp()\\n state_dict_index = {}\\n \\n+ buffer_names = [name for name, _ in model.named_buffers()]\\n+\\n for checkpoint_file in checkpoint_files:\\n checkpoint = torch.load(checkpoint_file)\\n if device_map is None:\\n@@ -703,7 +708,8 @@ def load_checkpoint_in_model(\\n param_device = device_map[module_name]\\n \\n if param_device == \"disk\":\\n- set_module_tensor_to_device(model, param_name, \"meta\")\\n+ if offload_buffers or param_name not in buffer_names:\\n+ set_module_tensor_to_device(model, param_name, \"meta\")\\n offload_weight(param, param_name, offload_folder, index=offload_index)\\n elif param_device == \"cpu\" and offload_state_dict:\\n set_module_tensor_to_device(model, param_name, \"meta\")\\ndiff --git a/tests/test_modeling_utils.py b/tests/test_modeling_utils.py\\nindex 16243d5e1..644d297b2 100644\\n--- a/tests/test_modeling_utils.py\\n+++ b/tests/test_modeling_utils.py\\n@@ -275,6 +275,31 @@ def test_load_checkpoint_in_model_one_gpu(self):\\n self.assertEqual(model.batchnorm.weight.device, torch.device(\"cpu\"))\\n self.assertEqual(model.linear2.weight.device, torch.device(\"cpu\"))\\n \\n+ @require_cuda\\n+ def test_load_checkpoint_in_model_disk_offload(self):\\n+ device_map = {\"linear1\": \"cpu\", \"batchnorm\": \"disk\", \"linear2\": \"cpu\"}\\n+\\n+ model = ModelForTest()\\n+ with tempfile.TemporaryDirectory() as tmp_dir:\\n+ fname = os.path.join(tmp_dir, \"pt_model.bin\")\\n+ torch.save(model.state_dict(), fname)\\n+ load_checkpoint_in_model(model, fname, device_map=device_map, offload_folder=tmp_dir)\\n+ self.assertEqual(model.linear1.weight.device, torch.device(\"cpu\"))\\n+ self.assertEqual(model.batchnorm.weight.device, torch.device(\"meta\"))\\n+ # Buffers are not offloaded by default\\n+ self.assertEqual(model.batchnorm.running_mean.device, torch.device(\"cpu\"))\\n+ self.assertEqual(model.linear2.weight.device, torch.device(\"cpu\"))\\n+\\n+ model = ModelForTest()\\n+ with tempfile.TemporaryDirectory() as tmp_dir:\\n+ fname = os.path.join(tmp_dir, \"pt_model.bin\")\\n+ torch.save(model.state_dict(), fname)\\n+ load_checkpoint_in_model(model, fname, device_map=device_map, offload_folder=tmp_dir, offload_buffers=True)\\n+ self.assertEqual(model.linear1.weight.device, torch.device(\"cpu\"))\\n+ self.assertEqual(model.batchnorm.weight.device, torch.device(\"meta\"))\\n+ self.assertEqual(model.batchnorm.running_mean.device, torch.device(\"meta\"))\\n+ self.assertEqual(model.linear2.weight.device, torch.device(\"cpu\"))\\n+\\n @require_multi_gpu\\n def test_load_checkpoint_in_model_two_gpu(self):\\n device_map = {\"linear1\": 0, \"batchnorm\": \"cpu\", \"linear2\": 1}\\n',\n", - " 'code_comments': [],\n", - " 'context': [{'body': '_The documentation is not available anymore as the PR was closed or merged._',\n", - " 'from_author': False}],\n", - " 'url': 'https://api.github.com/repos/huggingface/accelerate/pulls/951'},\n", - " 1183653396: {'diff': 'diff --git a/tests/deepspeed/test_deepspeed.py b/tests/deepspeed/test_deepspeed.py\\nindex fceb0bd23..03a4e346c 100644\\n--- a/tests/deepspeed/test_deepspeed.py\\n+++ b/tests/deepspeed/test_deepspeed.py\\n@@ -243,10 +243,7 @@ def test_deepspeed_plugin(self, stage):\\n \\n @parameterized.expand([FP16, BF16], name_func=parameterized_custom_name_func)\\n def test_accelerate_state_deepspeed(self, dtype):\\n- state = AcceleratorState(_from_accelerator=True)\\n- if state.initialized:\\n- state.initialized = False\\n-\\n+ AcceleratorState._reset_state()\\n deepspeed_plugin = DeepSpeedPlugin(\\n gradient_accumulation_steps=1,\\n gradient_clipping=1.0,\\n@@ -259,7 +256,6 @@ def test_accelerate_state_deepspeed(self, dtype):\\n with mockenv_context(**self.dist_env):\\n state = Accelerator(mixed_precision=dtype, deepspeed_plugin=deepspeed_plugin).state\\n self.assertTrue(state.deepspeed_plugin.deepspeed_config[dtype][\"enabled\"])\\n- state.initialized = False\\n \\n def test_init_zero3(self):\\n deepspeed_plugin = DeepSpeedPlugin(\\n',\n", - " 'code_comments': [],\n", - " 'context': [{'body': '_The documentation is not available anymore as the PR was closed or merged._',\n", - " 'from_author': False}],\n", - " 'url': 'https://api.github.com/repos/huggingface/accelerate/pulls/950'},\n", - " 1183552649: {'diff': 'diff --git a/src/accelerate/state.py b/src/accelerate/state.py\\nindex 31a6d64a2..d97327de4 100644\\n--- a/src/accelerate/state.py\\n+++ b/src/accelerate/state.py\\n@@ -35,6 +35,14 @@\\n import torch_xla.core.xla_model as xm\\n \\n \\n+def is_initialized() -> bool:\\n+ \"\"\"\\n+ Checks if the `AcceleratorState` has been initialized from `Accelerator`. Same as `AcceleratorState.initialized`,\\n+ but works as a module method.\\n+ \"\"\"\\n+ return AcceleratorState._shared_state != {}\\n+\\n+\\n # Inspired by Alex Martelli\\'s \\'Borg\\'.\\n class AcceleratorState:\\n \"\"\"\\n@@ -45,6 +53,7 @@ class AcceleratorState:\\n - **device** (`torch.device`) -- The device to use.\\n - **distributed_type** ([`~accelerate.state.DistributedType`]) -- The type of distributed environment currently\\n in use.\\n+ - **initialized** (`bool`) -- Whether or not the `AcceleratorState` has been initialized from `Accelerator`.\\n - **local_process_index** (`int`) -- The index of the current process on the current server.\\n - **mixed_precision** (`str`) -- Whether or not the current script will use mixed precision, and if so the type\\n of mixed precision being performed.\\n@@ -69,8 +78,7 @@ def __init__(\\n if parse_flag_from_env(\"ACCELERATE_USE_CPU\"):\\n cpu = True\\n self._check_initialized(mixed_precision, cpu)\\n- self.fork_launched = parse_flag_from_env(\"FORK_LAUNCHED\", 0)\\n- if not getattr(self, \"initialized\", False):\\n+ if not self.initialized:\\n self.backend = None\\n self.deepspeed_plugin = None\\n mixed_precision = (\\n@@ -245,18 +253,17 @@ def __init__(\\n and self.device.type == \"cuda\"\\n ):\\n torch.backends.cuda.matmul.allow_tf32 = True\\n- self.initialized = True\\n \\n- def __repr__(self):\\n- mixed_precision = self.mixed_precision\\n+ self.fork_launched = parse_flag_from_env(\"FORK_LAUNCHED\", 0)\\n \\n+ def __repr__(self):\\n repr = (\\n f\"Distributed environment: {self.distributed_type}{(\\' Backend: \\' + self.backend) if self.backend else \\'\\'}\\\\n\"\\n f\"Num processes: {self.num_processes}\\\\n\"\\n f\"Process index: {self.process_index}\\\\n\"\\n f\"Local process index: {self.local_process_index}\\\\n\"\\n f\"Device: {self.device}\\\\n\"\\n- f\"Mixed precision type: {mixed_precision}\\\\n\"\\n+ f\"Mixed precision type: {self.mixed_precision}\\\\n\"\\n )\\n if self.distributed_type == DistributedType.DEEPSPEED:\\n repr += f\"ds_config: {self.deepspeed_plugin.deepspeed_config}\\\\n\"\\n@@ -286,9 +293,14 @@ def _reset_state():\\n \"Resets `_shared_state`, is used internally and should not be called\"\\n AcceleratorState._shared_state = {}\\n \\n+ @property\\n+ def initialized(self) -> bool:\\n+ \"Returns whether the `AcceleratorState` has been initialized\"\\n+ return self._shared_state != {}\\n+\\n def _check_initialized(self, mixed_precision=None, cpu=None):\\n \"Checks if a modification is trying to be made and the `AcceleratorState` has already been initialized\"\\n- if getattr(self, \"initialized\", False):\\n+ if self.initialized:\\n err = \"AcceleratorState has already been initialized and cannot be changed, restart your runtime completely and pass `{flag}` to `Accelerate()`.\"\\n if cpu and self.device.type != \"cpu\":\\n raise ValueError(err.format(flag=\"cpu=True\"))\\n@@ -311,11 +323,15 @@ class GradientState:\\n \\n def __init__(self):\\n self.__dict__ = self._shared_state\\n- if not getattr(self, \"initialized\", False):\\n+ if not self.initialized:\\n self.sync_gradients = True\\n self.end_of_dataloader = False\\n self.remainder = -1\\n- self.initialized = True\\n+\\n+ @property\\n+ def initialized(self) -> bool:\\n+ \"Returns whether the `GradientState` has been initialized\"\\n+ return GradientState._shared_state != {}\\n \\n def __repr__(self):\\n return (\\n',\n", - " 'code_comments': [],\n", - " 'context': [{'body': '_The documentation is not available anymore as the PR was closed or merged._',\n", - " 'from_author': False}],\n", - " 'url': 'https://api.github.com/repos/huggingface/accelerate/pulls/949'},\n", - " 1178312394: {'diff': 'diff --git a/src/accelerate/utils/offload.py b/src/accelerate/utils/offload.py\\nindex 22f51cce1..84c115fd3 100644\\n--- a/src/accelerate/utils/offload.py\\n+++ b/src/accelerate/utils/offload.py\\n@@ -34,7 +34,7 @@ def offload_weight(weight, weight_name, offload_folder, index=None):\\n # Need to reinterpret the underlined data as int16 since NumPy does not handle bfloat16s.\\n weight = weight.view(torch.int16)\\n dtype = \"bfloat16\"\\n- array = weight.numpy()\\n+ array = weight.cpu().numpy()\\n tensor_file = os.path.join(offload_folder, f\"{weight_name}.dat\")\\n if index is not None:\\n if dtype is None:\\n',\n", - " 'code_comments': [],\n", - " 'context': [{'body': '_The documentation is not available anymore as the PR was closed or merged._',\n", - " 'from_author': False}],\n", - " 'url': 'https://api.github.com/repos/huggingface/accelerate/pulls/945'},\n", - " 1176583667: {'diff': 'diff --git a/src/accelerate/accelerator.py b/src/accelerate/accelerator.py\\nindex ec5041bfa..7c07e9105 100644\\n--- a/src/accelerate/accelerator.py\\n+++ b/src/accelerate/accelerator.py\\n@@ -363,7 +363,7 @@ def __init__(\\n if (\\n self.state.mixed_precision == \"fp16\"\\n and self.device.type != \"cpu\"\\n- and self.distributed_type != DistributedType.MEGATRON_LM\\n+ and self.distributed_type not in (DistributedType.DEEPSPEED, DistributedType.MEGATRON_LM)\\n ):\\n self.native_amp = True\\n if not torch.cuda.is_available() and not parse_flag_from_env(\"ACCELERATE_USE_MPS_DEVICE\"):\\n@@ -375,10 +375,10 @@ def __init__(\\n self.scaler = ShardedGradScaler(**kwargs)\\n else:\\n self.scaler = torch.cuda.amp.GradScaler(**kwargs)\\n- elif (\\n- self.state.mixed_precision == \"bf16\"\\n- and self.distributed_type != DistributedType.FSDP\\n- and self.distributed_type != DistributedType.MEGATRON_LM\\n+ elif self.state.mixed_precision == \"bf16\" and self.distributed_type not in (\\n+ DistributedType.DEEPSPEED,\\n+ DistributedType.FSDP,\\n+ DistributedType.MEGATRON_LM,\\n ):\\n if self.device.type == \"cpu\":\\n self.native_amp = is_torch_version(\">=\", \"1.10\")\\n',\n", - " 'code_comments': [],\n", - " 'context': [{'body': '_The documentation is not available anymore as the PR was closed or merged._',\n", - " 'from_author': False}],\n", - " 'url': 'https://api.github.com/repos/huggingface/accelerate/pulls/943'},\n", - " 1176562235: {'diff': 'diff --git a/src/accelerate/tracking.py b/src/accelerate/tracking.py\\nindex 7afebea48..379d6454e 100644\\n--- a/src/accelerate/tracking.py\\n+++ b/src/accelerate/tracking.py\\n@@ -37,7 +37,10 @@\\n _available_trackers = []\\n \\n if is_tensorboard_available():\\n- from torch.utils import tensorboard\\n+ try:\\n+ from torch.utils import tensorboard\\n+ except ModuleNotFoundError:\\n+ import tensorboardX as tensorboard\\n \\n _available_trackers.append(LoggerType.TENSORBOARD)\\n \\n',\n", - " 'code_comments': [],\n", - " 'context': [{'body': '_The documentation is not available anymore as the PR was closed or merged._',\n", - " 'from_author': False}],\n", - " 'url': 'https://api.github.com/repos/huggingface/accelerate/pulls/942'},\n", - " 1176040949: {'diff': 'diff --git a/docs/source/usage_guides/deepspeed.mdx b/docs/source/usage_guides/deepspeed.mdx\\nindex 29561c77b..0377296c1 100644\\n--- a/docs/source/usage_guides/deepspeed.mdx\\n+++ b/docs/source/usage_guides/deepspeed.mdx\\n@@ -395,6 +395,196 @@ We will look at the changes needed in the code when using these.\\n based on model, dataloaders, dummy optimizer and dummy schedulers provided to `prepare` method. \\n Only the `auto` fields specified in above examples are handled by `prepare` method and the rest have to be explicitly specified by the user.\\n \\n+**Things to note when using DeepSpeed Config File**\\n+\\n+Below is a sample script using `deepspeed_config_file` in different scenarios.\\n+\\n+Code `test.py`:\\n+\\n+```python\\n+from accelerate import Accelerator\\n+from accelerate.state import AcceleratorState\\n+\\n+\\n+def main():\\n+ accelerator = Accelerator()\\n+ accelerator.print(f\"{AcceleratorState()}\")\\n+\\n+\\n+if __name__ == \"__main__\":\\n+ main()\\n+```\\n+\\n+**Scenario 1**: Manually tampered accelerate config file having `deepspeed_config_file` along with other entries.\\n+\\n+1. Content of the `accelerate` config:\\n+\\n+```yaml\\n+command_file: null\\n+commands: null\\n+compute_environment: LOCAL_MACHINE\\n+deepspeed_config:\\n+ gradient_accumulation_steps: 1\\n+ gradient_clipping: 1.0\\n+ offload_optimizer_device: \\'cpu\\'\\n+ offload_param_device: \\'cpu\\'\\n+ zero3_init_flag: true\\n+ zero3_save_16bit_model: true\\n+ zero_stage: 3\\n+ deepspeed_config_file: \\'ds_config.json\\'\\n+distributed_type: DEEPSPEED\\n+downcast_bf16: \\'no\\'\\n+dynamo_backend: \\'NO\\'\\n+fsdp_config: {}\\n+gpu_ids: null\\n+machine_rank: 0\\n+main_process_ip: null\\n+main_process_port: null\\n+main_training_function: main\\n+megatron_lm_config: {}\\n+num_machines: 1\\n+num_processes: 2\\n+rdzv_backend: static\\n+same_network: true\\n+tpu_name: null\\n+tpu_zone: null\\n+use_cpu: false\\n+```\\n+\\n+2. `ds_config.json`:\\n+\\n+```json\\n+{\\n+ \"bf16\": {\\n+ \"enabled\": true\\n+ },\\n+ \"zero_optimization\": {\\n+ \"stage\": 3,\\n+ \"stage3_gather_16bit_weights_on_model_save\": false,\\n+ \"offload_optimizer\": {\\n+ \"device\": \"none\"\\n+ },\\n+ \"offload_param\": {\\n+ \"device\": \"none\"\\n+ }\\n+ },\\n+ \"gradient_clipping\": 1.0,\\n+ \"train_batch_size\": \"auto\",\\n+ \"train_micro_batch_size_per_gpu\": \"auto\",\\n+ \"gradient_accumulation_steps\": 10,\\n+ \"steps_per_print\": 2000000\\n+}\\n+```\\n+\\n+3. Output of `accelerate launch test.py`:\\n+\\n+```bash\\n+ValueError: When using `deepspeed_config_file`, the following accelerate config variables will be ignored: \\n+[\\'gradient_accumulation_steps\\', \\'gradient_clipping\\', \\'zero_stage\\', \\'offload_optimizer_device\\', \\'offload_param_device\\', \\n+\\'zero3_save_16bit_model\\', \\'mixed_precision\\'].\\n+Please specify them appropriately in the DeepSpeed config file.\\n+If you are using an accelerate config file, remove others config variables mentioned in the above specified list.\\n+The easiest method is to create a new config following the questionnaire via `accelerate config`.\\n+It will only ask for the necessary config variables when using `deepspeed_config_file`.\\n+```\\n+\\n+**Scenario 2**: Use the solution of the error to create new accelerate config and check that no ambiguity error is now thrown.\\n+\\n+1. Run `accelerate config`:\\n+\\n+```bash\\n+$ accelerate config\\n+-------------------------------------------------------------------------------------------------------------------------------\\n+In which compute environment are you running?\\n+This machine \\n+-------------------------------------------------------------------------------------------------------------------------------\\n+Which type of machine are you using? \\n+multi-GPU \\n+How many different machines will you use (use more than 1 for multi-node training)? [1]: \\n+Do you wish to optimize your script with torch dynamo?[yes/NO]: \\n+Do you want to use DeepSpeed? [yes/NO]: yes \\n+Do you want to specify a json file to a DeepSpeed config? [yes/NO]: yes \\n+Please enter the path to the json DeepSpeed config file: ds_config.json \\n+Do you want to enable `deepspeed.zero.Init` when using ZeRO Stage-3 for constructing massive models? [yes/NO]: yes\\n+How many GPU(s) should be used for distributed training? [1]:4\\n+accelerate configuration saved at ds_config_sample.yaml\\n+```\\n+\\n+2. Content of the `accelerate` config:\\n+\\n+```yaml\\n+compute_environment: LOCAL_MACHINE\\n+deepspeed_config:\\n+ deepspeed_config_file: ds_config.json\\n+ zero3_init_flag: true\\n+distributed_type: DEEPSPEED\\n+downcast_bf16: \\'no\\'\\n+dynamo_backend: \\'NO\\'\\n+fsdp_config: {}\\n+machine_rank: 0\\n+main_training_function: main\\n+megatron_lm_config: {}\\n+num_machines: 1\\n+num_processes: 4\\n+rdzv_backend: static\\n+same_network: true\\n+use_cpu: false\\n+```\\n+\\n+3. Output of `accelerate launch test.py`:\\n+\\n+```bash\\n+Distributed environment: DEEPSPEED Backend: nccl\\n+Num processes: 4\\n+Process index: 0\\n+Local process index: 0\\n+Device: cuda:0\\n+Mixed precision type: bf16\\n+ds_config: {\\'bf16\\': {\\'enabled\\': True}, \\'zero_optimization\\': {\\'stage\\': 3, \\'stage3_gather_16bit_weights_on_model_save\\': False, \\'offload_optimizer\\': {\\'device\\': \\'none\\'}, \\'offload_param\\': {\\'device\\': \\'none\\'}}, \\'gradient_clipping\\': 1.0, \\'train_batch_size\\': \\'auto\\', \\'train_micro_batch_size_per_gpu\\': \\'auto\\', \\'gradient_accumulation_steps\\': 10, \\'steps_per_print\\': inf, \\'fp16\\': {\\'enabled\\': False}}\\n+```\\n+\\n+**Scenario 3**: Setting the `accelerate launch` command arguments related to DeepSpeed as `\"auto\"` in the DeepSpeed` configuration file and check that things work as expected.\\n+\\n+1. New `ds_config.json` with `\"auto\"` for the `accelerate launch` DeepSpeed command arguments:\\n+\\n+```json\\n+{\\n+ \"bf16\": {\\n+ \"enabled\": \"auto\"\\n+ },\\n+ \"zero_optimization\": {\\n+ \"stage\": \"auto\",\\n+ \"stage3_gather_16bit_weights_on_model_save\": \"auto\",\\n+ \"offload_optimizer\": {\\n+ \"device\": \"auto\"\\n+ },\\n+ \"offload_param\": {\\n+ \"device\": \"auto\"\\n+ }\\n+ },\\n+ \"gradient_clipping\": \"auto\",\\n+ \"train_batch_size\": \"auto\",\\n+ \"train_micro_batch_size_per_gpu\": \"auto\",\\n+ \"gradient_accumulation_steps\": \"auto\",\\n+ \"steps_per_print\": 2000000\\n+}\\n+```\\n+\\n+2. Output of `accelerate launch --mixed_precision=\"fp16\" --zero_stage=3 --gradient_accumulation_steps=5 --gradient_clipping=1.0 --offload_param_device=\"cpu\" --offload_optimizer_device=\"nvme\" --zero3_save_16bit_model=\"true\" test.py`:\\n+\\n+```bash\\n+Distributed environment: DEEPSPEED Backend: nccl\\n+Num processes: 4\\n+Process index: 0\\n+Local process index: 0\\n+Device: cuda:0\\n+Mixed precision type: fp16\\n+ds_config: {\\'bf16\\': {\\'enabled\\': False}, \\'zero_optimization\\': {\\'stage\\': 3, \\'stage3_gather_16bit_weights_on_model_save\\': True, \\'offload_optimizer\\': {\\'device\\': \\'nvme\\'}, \\'offload_param\\': {\\'device\\': \\'cpu\\'}}, \\'gradient_clipping\\': 1.0, \\'train_batch_size\\': \\'auto\\', \\'train_micro_batch_size_per_gpu\\': \\'auto\\', \\'gradient_accumulation_steps\\': 5, \\'steps_per_print\\': inf, \\'fp16\\': {\\'enabled\\': True, \\'auto_cast\\': True}}\\n+```\\n+\\n+**Note**: Remaining `\"auto\"` values are handled in `accelerator.prepare()` call as explained in point 2 of \\n+`Important code changes when using DeepSpeed Config File`.\\n+\\n ## Saving and loading\\n \\n 1. Saving and loading of models is unchanged for ZeRO Stage-1 and Stage-2.\\ndiff --git a/src/accelerate/commands/config/cluster.py b/src/accelerate/commands/config/cluster.py\\nindex a3dcd2dcb..50acf55a1 100644\\n--- a/src/accelerate/commands/config/cluster.py\\n+++ b/src/accelerate/commands/config/cluster.py\\n@@ -460,7 +460,7 @@ def get_cluster_input():\\n \\n if distributed_type != DistributedType.TPU:\\n if distributed_type == DistributedType.DEEPSPEED and use_deepspeed_config:\\n- mixed_precision = \"no\"\\n+ mixed_precision = None\\n else:\\n mixed_precision = _ask_options(\\n \"Do you wish to use FP16 or BF16 (mixed precision)?\",\\ndiff --git a/src/accelerate/commands/config/config_args.py b/src/accelerate/commands/config/config_args.py\\nindex ba492802e..8b4a28292 100644\\n--- a/src/accelerate/commands/config/config_args.py\\n+++ b/src/accelerate/commands/config/config_args.py\\n@@ -78,6 +78,7 @@ def to_dict(self):\\n for key, value in result.items():\\n if isinstance(value, Enum):\\n result[key] = value.value\\n+ result = {k: v for k, v in result.items() if v is not None}\\n return result\\n \\n @classmethod\\n@@ -88,7 +89,7 @@ def from_json_file(cls, json_file=None):\\n if \"compute_environment\" not in config_dict:\\n config_dict[\"compute_environment\"] = ComputeEnvironment.LOCAL_MACHINE\\n if \"mixed_precision\" not in config_dict:\\n- config_dict[\"mixed_precision\"] = \"fp16\" if (\"fp16\" in config_dict and config_dict[\"fp16\"]) else \"no\"\\n+ config_dict[\"mixed_precision\"] = \"fp16\" if (\"fp16\" in config_dict and config_dict[\"fp16\"]) else None\\n if \"fp16\" in config_dict: # Convert the config to the new format.\\n del config_dict[\"fp16\"]\\n if \"use_cpu\" not in config_dict:\\n@@ -111,7 +112,7 @@ def from_yaml_file(cls, yaml_file=None):\\n config_dict[\"compute_environment\"] = ComputeEnvironment.LOCAL_MACHINE\\n \\n if \"mixed_precision\" not in config_dict:\\n- config_dict[\"mixed_precision\"] = \"fp16\" if (\"fp16\" in config_dict and config_dict[\"fp16\"]) else \"no\"\\n+ config_dict[\"mixed_precision\"] = \"fp16\" if (\"fp16\" in config_dict and config_dict[\"fp16\"]) else None\\n if \"fp16\" in config_dict: # Convert the config to the new format.\\n del config_dict[\"fp16\"]\\n if \"use_cpu\" not in config_dict:\\ndiff --git a/src/accelerate/commands/launch.py b/src/accelerate/commands/launch.py\\nindex 91d4427ac..b5f831b47 100644\\n--- a/src/accelerate/commands/launch.py\\n+++ b/src/accelerate/commands/launch.py\\n@@ -301,45 +301,50 @@ def launch_command_parser(subparsers=None):\\n \"--zero_stage\",\\n default=None,\\n type=int,\\n- help=\"DeepSpeed\\'s ZeRO optimization stage (useful only when `use_deepspeed` flag is passed).\",\\n+ help=\"DeepSpeed\\'s ZeRO optimization stage (useful only when `use_deepspeed` flag is passed). \"\\n+ \"If unspecified, will default to `2`.\",\\n )\\n deepspeed_args.add_argument(\\n \"--offload_optimizer_device\",\\n default=None,\\n type=str,\\n- help=\"Decides where (none|cpu|nvme) to offload optimizer states (useful only when `use_deepspeed` flag is passed).\",\\n+ help=\"Decides where (none|cpu|nvme) to offload optimizer states (useful only when `use_deepspeed` flag is passed). \"\\n+ \"If unspecified, will default to \\'none\\'.\",\\n )\\n deepspeed_args.add_argument(\\n \"--offload_param_device\",\\n default=None,\\n type=str,\\n- help=\"Decides where (none|cpu|nvme) to offload parameters (useful only when `use_deepspeed` flag is passed).\",\\n+ help=\"Decides where (none|cpu|nvme) to offload parameters (useful only when `use_deepspeed` flag is passed). \"\\n+ \"If unspecified, will default to \\'none\\'.\",\\n )\\n deepspeed_args.add_argument(\\n \"--gradient_accumulation_steps\",\\n default=None,\\n type=int,\\n- help=\"No of gradient_accumulation_steps used in your training script (useful only when `use_deepspeed` flag is passed).\",\\n+ help=\"No of gradient_accumulation_steps used in your training script (useful only when `use_deepspeed` flag is passed). \"\\n+ \"If unspecified, will default to `1`.\",\\n )\\n deepspeed_args.add_argument(\\n \"--gradient_clipping\",\\n default=None,\\n type=float,\\n- help=\"gradient clipping value used in your training script (useful only when `use_deepspeed` flag is passed).\",\\n+ help=\"gradient clipping value used in your training script (useful only when `use_deepspeed` flag is passed). \"\\n+ \"If unspecified, will default to `1.0`.\",\\n )\\n deepspeed_args.add_argument(\\n \"--zero3_init_flag\",\\n- default=\"true\",\\n+ default=None,\\n type=str,\\n help=\"Decides Whether (true|false) to enable `deepspeed.zero.Init` for constructing massive models. \"\\n- \"Only applicable with DeepSpeed ZeRO Stage-3.\",\\n+ \"Only applicable with DeepSpeed ZeRO Stage-3. If unspecified, will default to `true`.\",\\n )\\n deepspeed_args.add_argument(\\n \"--zero3_save_16bit_model\",\\n- default=\"false\",\\n+ default=None,\\n type=str,\\n help=\"Decides Whether (true|false) to save 16-bit model weights when using ZeRO Stage-3. \"\\n- \"Only applicable with DeepSpeed ZeRO Stage-3.\",\\n+ \"Only applicable with DeepSpeed ZeRO Stage-3. If unspecified, will default to `false`.\",\\n )\\n deepspeed_args.add_argument(\\n \"--deepspeed_hostfile\",\\n@@ -363,7 +368,7 @@ def launch_command_parser(subparsers=None):\\n \"--deepspeed_multinode_launcher\",\\n default=None,\\n type=str,\\n- help=\"DeepSpeed multi-node launcher to use.\",\\n+ help=\"DeepSpeed multi-node launcher to use. If unspecified, will default to `pdsh`.\",\\n )\\n \\n # fsdp arguments\\n@@ -717,14 +722,22 @@ def deepspeed_launcher(args):\\n \\n current_env[\"PYTHONPATH\"] = env_var_path_add(\"PYTHONPATH\", os.path.abspath(\".\"))\\n current_env[\"ACCELERATE_MIXED_PRECISION\"] = str(mixed_precision)\\n+ current_env[\"ACCELERATE_CONFIG_DS_FIELDS\"] = str(args.deepspeed_fields_from_accelerate_config).lower()\\n current_env[\"ACCELERATE_USE_DEEPSPEED\"] = \"true\"\\n- current_env[\"ACCELERATE_DEEPSPEED_ZERO_STAGE\"] = str(args.zero_stage)\\n- current_env[\"ACCELERATE_GRADIENT_ACCUMULATION_STEPS\"] = str(args.gradient_accumulation_steps)\\n- current_env[\"ACCELERATE_GRADIENT_CLIPPING\"] = str(args.gradient_clipping).lower()\\n- current_env[\"ACCELERATE_DEEPSPEED_OFFLOAD_OPTIMIZER_DEVICE\"] = str(args.offload_optimizer_device).lower()\\n- current_env[\"ACCELERATE_DEEPSPEED_OFFLOAD_PARAM_DEVICE\"] = str(args.offload_param_device).lower()\\n- current_env[\"ACCELERATE_DEEPSPEED_ZERO3_INIT\"] = str(args.zero3_init_flag).lower()\\n- current_env[\"ACCELERATE_DEEPSPEED_ZERO3_SAVE_16BIT_MODEL\"] = str(args.zero3_save_16bit_model).lower()\\n+ if args.zero_stage is not None:\\n+ current_env[\"ACCELERATE_DEEPSPEED_ZERO_STAGE\"] = str(args.zero_stage)\\n+ if args.gradient_accumulation_steps is not None:\\n+ current_env[\"ACCELERATE_GRADIENT_ACCUMULATION_STEPS\"] = str(args.gradient_accumulation_steps)\\n+ if args.gradient_clipping is not None:\\n+ current_env[\"ACCELERATE_GRADIENT_CLIPPING\"] = str(args.gradient_clipping).lower()\\n+ if args.offload_optimizer_device is not None:\\n+ current_env[\"ACCELERATE_DEEPSPEED_OFFLOAD_OPTIMIZER_DEVICE\"] = str(args.offload_optimizer_device).lower()\\n+ if args.offload_param_device is not None:\\n+ current_env[\"ACCELERATE_DEEPSPEED_OFFLOAD_PARAM_DEVICE\"] = str(args.offload_param_device).lower()\\n+ if args.zero3_init_flag is not None:\\n+ current_env[\"ACCELERATE_DEEPSPEED_ZERO3_INIT\"] = str(args.zero3_init_flag).lower()\\n+ if args.zero3_save_16bit_model is not None:\\n+ current_env[\"ACCELERATE_DEEPSPEED_ZERO3_SAVE_16BIT_MODEL\"] = str(args.zero3_save_16bit_model).lower()\\n if args.deepspeed_config_file is not None:\\n current_env[\"ACCELERATE_DEEPSPEED_CONFIG_FILE\"] = str(args.deepspeed_config_file)\\n \\n@@ -966,6 +979,7 @@ def launch_command(args):\\n \\n defaults = None\\n warned = []\\n+ mp_from_config_flag = False\\n # Get the default from the config file.\\n if args.config_file is not None or os.path.isfile(default_config_file) and not args.cpu:\\n defaults = load_config_from_file(args.config_file)\\n@@ -1013,7 +1027,12 @@ def launch_command(args):\\n ):\\n setattr(args, name, attr)\\n if not args.mixed_precision:\\n- args.mixed_precision = defaults.mixed_precision\\n+ if defaults.mixed_precision is None:\\n+ args.mixed_precision = \"no\"\\n+ else:\\n+ args.mixed_precision = defaults.mixed_precision\\n+ mp_from_config_flag = True\\n+\\n if args.dynamo_backend is None:\\n warned.append(\"\\\\t`--dynamo_backend` was set to a value of `\\'no\\'`\")\\n args.dynamo_backend = \"no\"\\n@@ -1056,6 +1075,10 @@ def launch_command(args):\\n \\n # Use the proper launcher\\n if args.use_deepspeed and not args.cpu:\\n+ args.deepspeed_fields_from_accelerate_config = list(defaults.deepspeed_config.keys()) if defaults else []\\n+ if mp_from_config_flag:\\n+ args.deepspeed_fields_from_accelerate_config.append(\"mixed_precision\")\\n+ args.deepspeed_fields_from_accelerate_config = \",\".join(args.deepspeed_fields_from_accelerate_config)\\n deepspeed_launcher(args)\\n elif args.use_fsdp and not args.cpu:\\n multi_gpu_launcher(args)\\ndiff --git a/src/accelerate/utils/dataclasses.py b/src/accelerate/utils/dataclasses.py\\nindex 5a87d7860..01f174a10 100644\\n--- a/src/accelerate/utils/dataclasses.py\\n+++ b/src/accelerate/utils/dataclasses.py\\n@@ -394,6 +394,28 @@ class DeepSpeedPlugin:\\n def __post_init__(self):\\n from .deepspeed import HfDeepSpeedConfig\\n \\n+ if self.gradient_accumulation_steps is None:\\n+ self.gradient_accumulation_steps = int(os.environ.get(\"ACCELERATE_GRADIENT_ACCUMULATION_STEPS\", 1))\\n+\\n+ if self.gradient_clipping is None:\\n+ gradient_clipping = os.environ.get(\"ACCELERATE_GRADIENT_CLIPPING\", \"none\")\\n+ if gradient_clipping != \"none\":\\n+ self.gradient_clipping = float(gradient_clipping)\\n+\\n+ if self.zero_stage is None:\\n+ self.zero_stage = int(os.environ.get(\"ACCELERATE_DEEPSPEED_ZERO_STAGE\", 2))\\n+\\n+ if self.offload_optimizer_device is None:\\n+ self.offload_optimizer_device = os.environ.get(\"ACCELERATE_DEEPSPEED_OFFLOAD_OPTIMIZER_DEVICE\", \"none\")\\n+\\n+ if self.offload_param_device is None:\\n+ self.offload_param_device = os.environ.get(\"ACCELERATE_DEEPSPEED_OFFLOAD_PARAM_DEVICE\", \"none\")\\n+\\n+ if self.zero3_save_16bit_model is None:\\n+ self.zero3_save_16bit_model = (\\n+ os.environ.get(\"ACCELERATE_DEEPSPEED_ZERO3_SAVE_16BIT_MODEL\", \"false\") == \"true\"\\n+ )\\n+\\n if self.hf_ds_config is None:\\n self.hf_ds_config = os.environ.get(\"ACCELERATE_DEEPSPEED_CONFIG_FILE\", \"none\")\\n if (\\n@@ -405,33 +427,22 @@ def __post_init__(self):\\n self.hf_ds_config = HfDeepSpeedConfig(self.hf_ds_config)\\n if \"gradient_accumulation_steps\" not in self.hf_ds_config.config:\\n self.hf_ds_config.config[\"gradient_accumulation_steps\"] = 1\\n- elif self.hf_ds_config.config[\"gradient_accumulation_steps\"] == \"auto\":\\n- raise ValueError(\"gradient_accumulation_steps cannot be set to \\'auto\\' in the DeepSpeed config.\")\\n if \"zero_optimization\" not in self.hf_ds_config.config:\\n raise ValueError(\"Please specify the ZeRO optimization config in the DeepSpeed config.\")\\n- else:\\n- if self.gradient_accumulation_steps is None:\\n- self.gradient_accumulation_steps = int(os.environ.get(\"ACCELERATE_GRADIENT_ACCUMULATION_STEPS\", 1))\\n-\\n- if self.gradient_clipping is None:\\n- gradient_clipping = os.environ.get(\"ACCELERATE_GRADIENT_CLIPPING\", \"none\")\\n- if gradient_clipping != \"none\":\\n- self.gradient_clipping = float(gradient_clipping)\\n-\\n- if self.zero_stage is None:\\n- self.zero_stage = int(os.environ.get(\"ACCELERATE_DEEPSPEED_ZERO_STAGE\", 2))\\n-\\n- if self.offload_optimizer_device is None:\\n- self.offload_optimizer_device = os.environ.get(\"ACCELERATE_DEEPSPEED_OFFLOAD_OPTIMIZER_DEVICE\", \"none\")\\n-\\n- if self.offload_param_device is None:\\n- self.offload_param_device = os.environ.get(\"ACCELERATE_DEEPSPEED_OFFLOAD_PARAM_DEVICE\", \"none\")\\n-\\n- if self.zero3_save_16bit_model is None:\\n- self.zero3_save_16bit_model = (\\n- os.environ.get(\"ACCELERATE_DEEPSPEED_ZERO3_SAVE_16BIT_MODEL\", \"false\") == \"true\"\\n- )\\n \\n+ self._deepspeed_config_checks()\\n+ kwargs = {\\n+ \"gradient_accumulation_steps\": self.gradient_accumulation_steps,\\n+ \"gradient_clipping\": self.gradient_clipping if self.gradient_clipping else 1.0,\\n+ \"zero_optimization.stage\": self.zero_stage,\\n+ \"zero_optimization.offload_optimizer.device\": self.offload_optimizer_device,\\n+ \"zero_optimization.offload_param.device\": self.offload_param_device,\\n+ \"zero_optimization.stage3_gather_16bit_weights_on_model_save\": self.zero3_save_16bit_model,\\n+ }\\n+ for key in kwargs.keys():\\n+ self.fill_match(key, **kwargs, must_match=False)\\n+ self.hf_ds_config.set_stage_and_offload()\\n+ else:\\n config = {\\n \"train_batch_size\": \"auto\",\\n \"train_micro_batch_size_per_gpu\": \"auto\",\\n@@ -450,15 +461,19 @@ def __post_init__(self):\\n if self.gradient_clipping:\\n config[\"gradient_clipping\"] = self.gradient_clipping\\n self.hf_ds_config = HfDeepSpeedConfig(config)\\n+\\n self.deepspeed_config = self.hf_ds_config.config\\n self.deepspeed_config[\"steps_per_print\"] = float(\"inf\") # this will stop deepspeed from logging @ stdout\\n if self.zero3_init_flag is None:\\n- self.zero3_init_flag = os.environ.get(\"ACCELERATE_DEEPSPEED_ZERO3_INIT\", \"false\") == \"true\"\\n+ self.zero3_init_flag = (\\n+ strtobool(os.environ.get(\"ACCELERATE_DEEPSPEED_ZERO3_INIT\", str(self.hf_ds_config.is_zero3()))) == 1\\n+ )\\n if self.zero3_init_flag and not self.hf_ds_config.is_zero3():\\n warnings.warn(\"DeepSpeed Zero3 Init flag is only applicable for ZeRO Stage 3. Setting it to False.\")\\n self.zero3_init_flag = False\\n \\n- def fill_match(self, ds_key_long, mismatches, must_match=True, **kwargs):\\n+ def fill_match(self, ds_key_long, mismatches=None, must_match=True, **kwargs):\\n+ mismatches = [] if mismatches is None else mismatches\\n config, ds_key = self.hf_ds_config.find_config_node(ds_key_long)\\n if config is None:\\n return\\n@@ -503,10 +518,28 @@ def deepspeed_config_process(self, prefix=\"\", mismatches=None, config=None, must\\n \\n def set_mixed_precision(self, mixed_precision):\\n ds_config = self.deepspeed_config\\n- if mixed_precision == \"fp16\" and \"fp16\" not in ds_config and \"bf16\" not in ds_config:\\n- ds_config.update({\"fp16\": {\"enabled\": True, \"auto_cast\": True}})\\n- elif mixed_precision == \"bf16\" and \"fp16\" not in ds_config and \"bf16\" not in ds_config:\\n- ds_config.update({\"bf16\": {\"enabled\": True}})\\n+ kwargs = {\\n+ \"fp16.enabled\": mixed_precision == \"fp16\",\\n+ \"bf16.enabled\": mixed_precision == \"bf16\",\\n+ }\\n+ if mixed_precision == \"fp16\":\\n+ if \"fp16\" not in ds_config:\\n+ ds_config[\"fp16\"] = {\"enabled\": True, \"auto_cast\": True}\\n+ elif mixed_precision == \"bf16\":\\n+ if \"bf16\" not in ds_config:\\n+ ds_config[\"bf16\"] = {\"enabled\": True}\\n+\\n+ if mixed_precision != \"no\":\\n+ diff_dtype = \"bf16\" if mixed_precision == \"fp16\" else \"fp16\"\\n+ if str(ds_config.get(diff_dtype, {}).get(\"enabled\", \"False\")).lower() == \"true\":\\n+ raise ValueError(\\n+ f\"`--mixed_precision` arg cannot be set to `{mixed_precision}` when `{diff_dtype}` is set in the DeepSpeed config file.\"\\n+ )\\n+ for dtype in [\"fp16\", \"bf16\"]:\\n+ if dtype not in ds_config:\\n+ ds_config[dtype] = {\"enabled\": False}\\n+ self.fill_match(\"fp16.enabled\", must_match=False, **kwargs)\\n+ self.fill_match(\"bf16.enabled\", must_match=False, **kwargs)\\n \\n def set_deepspeed_weakref(self):\\n from .imports import is_transformers_available\\n@@ -549,6 +582,31 @@ def zero3_init_context_manager(self, enable=False):\\n self.dschf = None\\n self.set_deepspeed_weakref()\\n \\n+ def _deepspeed_config_checks(self):\\n+ env_variable_names_to_ignore = [\\n+ \"ACCELERATE_GRADIENT_ACCUMULATION_STEPS\",\\n+ \"ACCELERATE_GRADIENT_CLIPPING\",\\n+ \"ACCELERATE_DEEPSPEED_ZERO_STAGE\",\\n+ \"ACCELERATE_DEEPSPEED_OFFLOAD_OPTIMIZER_DEVICE\",\\n+ \"ACCELERATE_DEEPSPEED_OFFLOAD_PARAM_DEVICE\",\\n+ \"ACCELERATE_DEEPSPEED_ZERO3_SAVE_16BIT_MODEL\",\\n+ \"ACCELERATE_MIXED_PRECISION\",\\n+ ]\\n+ env_variable_names_to_ignore = [\\n+ name.replace(\"ACCELERATE_\", \"\").replace(\"DEEPSPEED_\", \"\").lower() for name in env_variable_names_to_ignore\\n+ ]\\n+\\n+ deepspeed_fields_from_accelerate_config = os.environ.get(\"ACCELERATE_CONFIG_DS_FIELDS\", \"\").split(\",\")\\n+\\n+ if any(name in env_variable_names_to_ignore for name in deepspeed_fields_from_accelerate_config):\\n+ raise ValueError(\\n+ f\"When using `deepspeed_config_file`, the following accelerate config variables will be ignored: {env_variable_names_to_ignore}.\\\\n\"\\n+ \"Please specify them appropriately in the DeepSpeed config file.\\\\n\"\\n+ \"If you are using an accelerate config file, remove others config variables mentioned in the above specified list.\\\\n\"\\n+ \"The easiest method is to create a new config following the questionnaire via `accelerate config`.\\\\n\"\\n+ \"It will only ask for the necessary config variables when using `deepspeed_config_file`.\"\\n+ )\\n+\\n \\n @dataclass\\n class FullyShardedDataParallelPlugin:\\ndiff --git a/src/accelerate/utils/deepspeed.py b/src/accelerate/utils/deepspeed.py\\nindex 02d1ab8bc..69dc5c7f8 100644\\n--- a/src/accelerate/utils/deepspeed.py\\n+++ b/src/accelerate/utils/deepspeed.py\\n@@ -50,6 +50,9 @@ def __init__(self, config_file_or_dict):\\n raise ValueError(\"expecting either a path to a DeepSpeed config file or a pre-populated dict\")\\n self.config = config\\n \\n+ self.set_stage_and_offload()\\n+\\n+ def set_stage_and_offload(self):\\n # zero stage - this is done as early as possible, before model is created, to allow\\n # ``is_deepspeed_zero3_enabled`` query and getting to the early deepspeed config object\\n # during ``zero.Init()`` which needs to know the dtype, and some other hparams.\\ndiff --git a/tests/deepspeed/test_deepspeed.py b/tests/deepspeed/test_deepspeed.py\\nindex eca75c1ed..fceb0bd23 100644\\n--- a/tests/deepspeed/test_deepspeed.py\\n+++ b/tests/deepspeed/test_deepspeed.py\\n@@ -285,8 +285,6 @@ def test_prepare_deepspeed(self, optim_type, scheduler_type):\\n from deepspeed.runtime.engine import DeepSpeedEngine\\n \\n kwargs = {\\n- \"fp16.enabled\": True,\\n- \"bf16.enabled\": False,\\n \"optimizer.params.lr\": 5e-5,\\n \"optimizer.params.weight_decay\": 0.0,\\n \"scheduler.params.warmup_min_lr\": 0.0,\\n@@ -370,7 +368,7 @@ def test_prepare_deepspeed(self, optim_type, scheduler_type):\\n # Test DeepSpeed optimizer + DeepSpeed scheduler\\n deepspeed_plugin = DeepSpeedPlugin(hf_ds_config=self.ds_config_file[ZERO2])\\n with mockenv_context(**self.dist_env):\\n- accelerator = Accelerator(deepspeed_plugin=deepspeed_plugin)\\n+ accelerator = Accelerator(deepspeed_plugin=deepspeed_plugin, mixed_precision=\"fp16\")\\n train_set = RegressionDataset(length=80)\\n eval_set = RegressionDataset(length=20)\\n train_dataloader = DataLoader(train_set, batch_size=10, shuffle=True)\\n@@ -430,7 +428,7 @@ def test_prepare_deepspeed(self, optim_type, scheduler_type):\\n # Test custom optimizer + DeepSpeed scheduler\\n deepspeed_plugin = DeepSpeedPlugin(hf_ds_config=self.ds_config_file[ZERO2])\\n with mockenv_context(**self.dist_env):\\n- accelerator = Accelerator(deepspeed_plugin=deepspeed_plugin)\\n+ accelerator = Accelerator(deepspeed_plugin=deepspeed_plugin, mixed_precision=\"fp16\")\\n train_set = RegressionDataset(length=80)\\n eval_set = RegressionDataset(length=20)\\n train_dataloader = DataLoader(train_set, batch_size=10, shuffle=True)\\n@@ -463,7 +461,7 @@ def test_prepare_deepspeed(self, optim_type, scheduler_type):\\n # Test deepspeed optimizer + custom scheduler\\n deepspeed_plugin = DeepSpeedPlugin(hf_ds_config=self.ds_config_file[ZERO2])\\n with mockenv_context(**self.dist_env):\\n- accelerator = Accelerator(deepspeed_plugin=deepspeed_plugin)\\n+ accelerator = Accelerator(deepspeed_plugin=deepspeed_plugin, mixed_precision=\"fp16\")\\n train_set = RegressionDataset(length=80)\\n eval_set = RegressionDataset(length=20)\\n train_dataloader = DataLoader(train_set, batch_size=10, shuffle=True)\\n@@ -501,8 +499,6 @@ def test_save_checkpoints(self):\\n )\\n del deepspeed_plugin.deepspeed_config[\"bf16\"]\\n kwargs = {\\n- \"fp16.enabled\": True,\\n- \"bf16.enabled\": False,\\n \"optimizer.params.lr\": 5e-5,\\n \"optimizer.params.weight_decay\": 0.0,\\n \"scheduler.params.warmup_min_lr\": 0.0,\\n@@ -518,7 +514,7 @@ def test_save_checkpoints(self):\\n }\\n \\n with mockenv_context(**self.dist_env):\\n- accelerator = Accelerator(deepspeed_plugin=deepspeed_plugin)\\n+ accelerator = Accelerator(deepspeed_plugin=deepspeed_plugin, mixed_precision=\"fp16\")\\n kwargs[\"train_batch_size\"] = (\\n kwargs[\"train_micro_batch_size_per_gpu\"]\\n * deepspeed_plugin.deepspeed_config[\"gradient_accumulation_steps\"]\\n@@ -594,6 +590,81 @@ def test_autofill_dsconfig(self):\\n accelerator.deepspeed_config[\"zero_optimization\"][\"stage3_gather_16bit_weights_on_model_save\"]\\n )\\n \\n+ @parameterized.expand([FP16, BF16], name_func=parameterized_custom_name_func)\\n+ def test_autofill_dsconfig_from_ds_plugin(self, dtype):\\n+ ds_config = self.ds_config_dict[\"zero3\"]\\n+ if dtype == BF16:\\n+ del ds_config[\"fp16\"]\\n+ else:\\n+ del ds_config[\"bf16\"]\\n+ ds_config[dtype][\"enabled\"] = \"auto\"\\n+ ds_config[\"zero_optimization\"][\"stage\"] = \"auto\"\\n+ ds_config[\"zero_optimization\"][\"stage3_gather_16bit_weights_on_model_save\"] = \"auto\"\\n+ ds_config[\"zero_optimization\"][\"offload_optimizer\"][\"device\"] = \"auto\"\\n+ ds_config[\"zero_optimization\"][\"offload_param\"][\"device\"] = \"auto\"\\n+ ds_config[\"gradient_accumulation_steps\"] = \"auto\"\\n+ ds_config[\"gradient_clipping\"] = \"auto\"\\n+\\n+ deepspeed_plugin = DeepSpeedPlugin(\\n+ hf_ds_config=ds_config,\\n+ zero3_init_flag=True,\\n+ gradient_accumulation_steps=1,\\n+ gradient_clipping=1.0,\\n+ zero_stage=2,\\n+ offload_optimizer_device=\"cpu\",\\n+ offload_param_device=\"cpu\",\\n+ zero3_save_16bit_model=True,\\n+ )\\n+\\n+ with mockenv_context(**self.dist_env):\\n+ accelerator = Accelerator(deepspeed_plugin=deepspeed_plugin, mixed_precision=dtype)\\n+ deepspeed_plugin = accelerator.state.deepspeed_plugin\\n+ self.assertEqual(deepspeed_plugin.deepspeed_config[\"gradient_clipping\"], 1.0)\\n+ self.assertEqual(deepspeed_plugin.deepspeed_config[\"gradient_accumulation_steps\"], 1)\\n+ self.assertEqual(deepspeed_plugin.deepspeed_config[\"zero_optimization\"][\"stage\"], 2)\\n+ self.assertEqual(\\n+ deepspeed_plugin.deepspeed_config[\"zero_optimization\"][\"offload_optimizer\"][\"device\"], \"cpu\"\\n+ )\\n+ self.assertEqual(deepspeed_plugin.deepspeed_config[\"zero_optimization\"][\"offload_param\"][\"device\"], \"cpu\")\\n+ self.assertTrue(\\n+ deepspeed_plugin.deepspeed_config[\"zero_optimization\"][\"stage3_gather_16bit_weights_on_model_save\"]\\n+ )\\n+ self.assertTrue(deepspeed_plugin.deepspeed_config[dtype][\"enabled\"])\\n+\\n+ AcceleratorState._reset_state()\\n+ diff_dtype = \"bf16\" if dtype == \"fp16\" else \"fp16\"\\n+ with mockenv_context(**self.dist_env):\\n+ with self.assertRaises(ValueError) as cm:\\n+ accelerator = Accelerator(deepspeed_plugin=deepspeed_plugin, mixed_precision=diff_dtype)\\n+ self.assertTrue(\\n+ f\"`--mixed_precision` arg cannot be set to `{diff_dtype}` when `{dtype}` is set in the DeepSpeed config file.\"\\n+ in str(cm.exception)\\n+ )\\n+\\n+ def test_ds_config_assertions(self):\\n+ ambiguous_env = self.dist_env.copy()\\n+ ambiguous_env[\\n+ \"ACCELERATE_CONFIG_DS_FIELDS\"\\n+ ] = \"gradient_accumulation_steps,gradient_clipping,zero_stage,offload_optimizer_device,offload_param_device,zero3_save_16bit_model,mixed_precision\"\\n+\\n+ with mockenv_context(**ambiguous_env):\\n+ with self.assertRaises(ValueError) as cm:\\n+ deepspeed_plugin = DeepSpeedPlugin(\\n+ hf_ds_config=self.ds_config_file[ZERO3],\\n+ zero3_init_flag=True,\\n+ gradient_accumulation_steps=1,\\n+ gradient_clipping=1.0,\\n+ zero_stage=ZERO2,\\n+ offload_optimizer_device=\"cpu\",\\n+ offload_param_device=\"cpu\",\\n+ zero3_save_16bit_model=True,\\n+ )\\n+ _ = Accelerator(deepspeed_plugin=deepspeed_plugin, mixed_precision=FP16)\\n+ self.assertTrue(\\n+ \"If you are using an accelerate config file, remove others config variables mentioned in the above specified list.\"\\n+ in str(cm.exception)\\n+ )\\n+\\n def test_basic_run(self):\\n mod_file = inspect.getfile(accelerate.test_utils)\\n test_file_path = os.path.sep.join(\\n',\n", - " 'code_comments': [{'body': 'Nit: all `none`->`None`',\n", - " 'diff_hunk': '@@ -301,45 +301,50 @@ def launch_command_parser(subparsers=None):\\n \"--zero_stage\",\\n default=None,\\n type=int,\\n- help=\"DeepSpeed\\'s ZeRO optimization stage (useful only when `use_deepspeed` flag is passed).\",\\n+ help=\"DeepSpeed\\'s ZeRO optimization stage (useful only when `use_deepspeed` flag is passed). \"\\n+ \"If unspecified, will default to `2`.\",\\n )\\n deepspeed_args.add_argument(\\n \"--offload_optimizer_device\",\\n default=None,\\n type=str,\\n- help=\"Decides where (none|cpu|nvme) to offload optimizer states (useful only when `use_deepspeed` flag is passed).\",\\n+ help=\"Decides where (none|cpu|nvme) to offload optimizer states (useful only when `use_deepspeed` flag is passed). \"\\n+ \"If unspecified, will default to `none`.\",',\n", - " 'from_author': False},\n", - " {'body': '```suggestion\\r\\n \"If you are using an accelerate config file, set `mixed_precision=no` \"\\r\\n```',\n", - " 'diff_hunk': '@@ -549,6 +551,40 @@ def zero3_init_context_manager(self, enable=False):\\n self.dschf = None\\n self.set_deepspeed_weakref()\\n \\n+ def _deepspeed_config_checks(self):\\n+ env_variable_names_to_ignore = [\\n+ \"ACCELERATE_GRADIENT_ACCUMULATION_STEPS\",\\n+ \"ACCELERATE_GRADIENT_CLIPPING\",\\n+ \"ACCELERATE_DEEPSPEED_ZERO_STAGE\",\\n+ \"ACCELERATE_DEEPSPEED_OFFLOAD_OPTIMIZER_DEVICE\",\\n+ \"ACCELERATE_DEEPSPEED_OFFLOAD_PARAM_DEVICE\",\\n+ \"ACCELERATE_DEEPSPEED_ZERO3_SAVE_16BIT_MODEL\",\\n+ \"ACCELERATE_MIXED_PRECISION\",\\n+ ]\\n+ duplicate_values_flag = False\\n+ for name in env_variable_names_to_ignore:\\n+ if name != \"ACCELERATE_MIXED_PRECISION\" and os.environ.get(name, None) is not None:\\n+ duplicate_values_flag = True\\n+ elif name == \"ACCELERATE_MIXED_PRECISION\" and os.environ.get(name, \"no\") != \"no\":\\n+ duplicate_values_flag = True\\n+ if duplicate_values_flag:\\n+ break\\n+\\n+ if duplicate_values_flag:\\n+ env_variable_names_to_ignore = [\\n+ name.replace(\"ACCELERATE_\", \"\").replace(\"DEEPSPEED_\", \"\").lower()\\n+ for name in env_variable_names_to_ignore\\n+ ]\\n+ raise ValueError(\\n+ f\"When using `deepspeed_config_file`, the following accelerate config variables will be ignored: {env_variable_names_to_ignore}.\\\\n\"\\n+ \"Please specify them appropriately in the DeepSpeed config file.\\\\n\"\\n+ \"If you are using accelerate config file, set `mixed_precision=no` \"',\n", - " 'from_author': False},\n", - " {'body': '```suggestion\\r\\n \"and make sure to not specify these config variables in `accelerate launch` command. \\\\n\"\\r\\n```',\n", - " 'diff_hunk': '@@ -549,6 +551,40 @@ def zero3_init_context_manager(self, enable=False):\\n self.dschf = None\\n self.set_deepspeed_weakref()\\n \\n+ def _deepspeed_config_checks(self):\\n+ env_variable_names_to_ignore = [\\n+ \"ACCELERATE_GRADIENT_ACCUMULATION_STEPS\",\\n+ \"ACCELERATE_GRADIENT_CLIPPING\",\\n+ \"ACCELERATE_DEEPSPEED_ZERO_STAGE\",\\n+ \"ACCELERATE_DEEPSPEED_OFFLOAD_OPTIMIZER_DEVICE\",\\n+ \"ACCELERATE_DEEPSPEED_OFFLOAD_PARAM_DEVICE\",\\n+ \"ACCELERATE_DEEPSPEED_ZERO3_SAVE_16BIT_MODEL\",\\n+ \"ACCELERATE_MIXED_PRECISION\",\\n+ ]\\n+ duplicate_values_flag = False\\n+ for name in env_variable_names_to_ignore:\\n+ if name != \"ACCELERATE_MIXED_PRECISION\" and os.environ.get(name, None) is not None:\\n+ duplicate_values_flag = True\\n+ elif name == \"ACCELERATE_MIXED_PRECISION\" and os.environ.get(name, \"no\") != \"no\":\\n+ duplicate_values_flag = True\\n+ if duplicate_values_flag:\\n+ break\\n+\\n+ if duplicate_values_flag:\\n+ env_variable_names_to_ignore = [\\n+ name.replace(\"ACCELERATE_\", \"\").replace(\"DEEPSPEED_\", \"\").lower()\\n+ for name in env_variable_names_to_ignore\\n+ ]\\n+ raise ValueError(\\n+ f\"When using `deepspeed_config_file`, the following accelerate config variables will be ignored: {env_variable_names_to_ignore}.\\\\n\"\\n+ \"Please specify them appropriately in the DeepSpeed config file.\\\\n\"\\n+ \"If you are using accelerate config file, set `mixed_precision=no` \"\\n+ \"and remove others config variables mentioned in the above specified list; \"\\n+ \"else don\\'t specify these config variables in `accelerate launch` command. \\\\n\"',\n", - " 'from_author': False},\n", - " {'body': 'Hello, `none` is the string option as possible values are `cpu`|`nvme`l`none`',\n", - " 'diff_hunk': '@@ -301,45 +301,50 @@ def launch_command_parser(subparsers=None):\\n \"--zero_stage\",\\n default=None,\\n type=int,\\n- help=\"DeepSpeed\\'s ZeRO optimization stage (useful only when `use_deepspeed` flag is passed).\",\\n+ help=\"DeepSpeed\\'s ZeRO optimization stage (useful only when `use_deepspeed` flag is passed). \"\\n+ \"If unspecified, will default to `2`.\",\\n )\\n deepspeed_args.add_argument(\\n \"--offload_optimizer_device\",\\n default=None,\\n type=str,\\n- help=\"Decides where (none|cpu|nvme) to offload optimizer states (useful only when `use_deepspeed` flag is passed).\",\\n+ help=\"Decides where (none|cpu|nvme) to offload optimizer states (useful only when `use_deepspeed` flag is passed). \"\\n+ \"If unspecified, will default to `none`.\",',\n", - " 'from_author': True},\n", - " {'body': 'Oh then add quotes?',\n", - " 'diff_hunk': '@@ -301,45 +301,50 @@ def launch_command_parser(subparsers=None):\\n \"--zero_stage\",\\n default=None,\\n type=int,\\n- help=\"DeepSpeed\\'s ZeRO optimization stage (useful only when `use_deepspeed` flag is passed).\",\\n+ help=\"DeepSpeed\\'s ZeRO optimization stage (useful only when `use_deepspeed` flag is passed). \"\\n+ \"If unspecified, will default to `2`.\",\\n )\\n deepspeed_args.add_argument(\\n \"--offload_optimizer_device\",\\n default=None,\\n type=str,\\n- help=\"Decides where (none|cpu|nvme) to offload optimizer states (useful only when `use_deepspeed` flag is passed).\",\\n+ help=\"Decides where (none|cpu|nvme) to offload optimizer states (useful only when `use_deepspeed` flag is passed). \"\\n+ \"If unspecified, will default to `none`.\",',\n", - " 'from_author': False},\n", - " {'body': 'Done.',\n", - " 'diff_hunk': '@@ -301,45 +301,50 @@ def launch_command_parser(subparsers=None):\\n \"--zero_stage\",\\n default=None,\\n type=int,\\n- help=\"DeepSpeed\\'s ZeRO optimization stage (useful only when `use_deepspeed` flag is passed).\",\\n+ help=\"DeepSpeed\\'s ZeRO optimization stage (useful only when `use_deepspeed` flag is passed). \"\\n+ \"If unspecified, will default to `2`.\",\\n )\\n deepspeed_args.add_argument(\\n \"--offload_optimizer_device\",\\n default=None,\\n type=str,\\n- help=\"Decides where (none|cpu|nvme) to offload optimizer states (useful only when `use_deepspeed` flag is passed).\",\\n+ help=\"Decides where (none|cpu|nvme) to offload optimizer states (useful only when `use_deepspeed` flag is passed). \"\\n+ \"If unspecified, will default to `none`.\",',\n", - " 'from_author': True},\n", - " {'body': \"Did I understand it right that here you suggest to the user to set `mixed_precision=no` even if they use mixed precision in ds_config.json? If so this now is worse than before, as now you're proposing to force 2 opposite values in the same config entry. As in `mixed_precision=no` and `fp16 { enabled: true}`.\\r\\n\\r\\nUnless I'm misunderstanding the wording that is. \\r\\n\\r\\nI think it should be either `mixed_precision=xyz` or `ds_config.json` (with fp16 or bf16 blocks or no block at all as fp16 is the default), but not both.\",\n", - " 'diff_hunk': '@@ -549,6 +551,40 @@ def zero3_init_context_manager(self, enable=False):\\n self.dschf = None\\n self.set_deepspeed_weakref()\\n \\n+ def _deepspeed_config_checks(self):\\n+ env_variable_names_to_ignore = [\\n+ \"ACCELERATE_GRADIENT_ACCUMULATION_STEPS\",\\n+ \"ACCELERATE_GRADIENT_CLIPPING\",\\n+ \"ACCELERATE_DEEPSPEED_ZERO_STAGE\",\\n+ \"ACCELERATE_DEEPSPEED_OFFLOAD_OPTIMIZER_DEVICE\",\\n+ \"ACCELERATE_DEEPSPEED_OFFLOAD_PARAM_DEVICE\",\\n+ \"ACCELERATE_DEEPSPEED_ZERO3_SAVE_16BIT_MODEL\",\\n+ \"ACCELERATE_MIXED_PRECISION\",\\n+ ]\\n+ duplicate_values_flag = False\\n+ for name in env_variable_names_to_ignore:\\n+ if name != \"ACCELERATE_MIXED_PRECISION\" and os.environ.get(name, None) is not None:\\n+ duplicate_values_flag = True\\n+ elif name == \"ACCELERATE_MIXED_PRECISION\" and os.environ.get(name, \"no\") != \"no\":\\n+ duplicate_values_flag = True\\n+ if duplicate_values_flag:\\n+ break\\n+\\n+ if duplicate_values_flag:\\n+ env_variable_names_to_ignore = [\\n+ name.replace(\"ACCELERATE_\", \"\").replace(\"DEEPSPEED_\", \"\").lower()\\n+ for name in env_variable_names_to_ignore\\n+ ]\\n+ raise ValueError(\\n+ f\"When using `deepspeed_config_file`, the following accelerate config variables will be ignored: {env_variable_names_to_ignore}.\\\\n\"\\n+ \"Please specify them appropriately in the DeepSpeed config file.\\\\n\"\\n+ \"If you are using an accelerate config file, set `mixed_precision=no` \"',\n", - " 'from_author': False},\n", - " {'body': '```suggestion\\r\\n \"The easiest method is to create a new config following the questionnaire via `accelerate config`.\\\\n\"\\r\\n```',\n", - " 'diff_hunk': '@@ -578,10 +578,9 @@ def _deepspeed_config_checks(self):\\n raise ValueError(\\n f\"When using `deepspeed_config_file`, the following accelerate config variables will be ignored: {env_variable_names_to_ignore}.\\\\n\"\\n \"Please specify them appropriately in the DeepSpeed config file.\\\\n\"\\n- \"If you are using an accelerate config file, set `mixed_precision=no` \"\\n- \"and remove others config variables mentioned in the above specified list; \"\\n+ \"If you are using an accelerate config file, remove others config variables mentioned in the above specified list; \"\\n \"and make sure to not specify these config variables in `accelerate launch` command. \\\\n\"\\n- \"The easiest method is to create new config following the questionnaire via `accelerate config`.\\\\n\"\\n+ \"The easiest method is to create new config following the questionnaire via `accelerate config`.\\\\n\"',\n", - " 'from_author': False},\n", - " {'body': \"Done. I've simplified the error message wherein user can just remove all the ambiguous entries from accelerate config file and not specify them in launch command\",\n", - " 'diff_hunk': '@@ -549,6 +551,40 @@ def zero3_init_context_manager(self, enable=False):\\n self.dschf = None\\n self.set_deepspeed_weakref()\\n \\n+ def _deepspeed_config_checks(self):\\n+ env_variable_names_to_ignore = [\\n+ \"ACCELERATE_GRADIENT_ACCUMULATION_STEPS\",\\n+ \"ACCELERATE_GRADIENT_CLIPPING\",\\n+ \"ACCELERATE_DEEPSPEED_ZERO_STAGE\",\\n+ \"ACCELERATE_DEEPSPEED_OFFLOAD_OPTIMIZER_DEVICE\",\\n+ \"ACCELERATE_DEEPSPEED_OFFLOAD_PARAM_DEVICE\",\\n+ \"ACCELERATE_DEEPSPEED_ZERO3_SAVE_16BIT_MODEL\",\\n+ \"ACCELERATE_MIXED_PRECISION\",\\n+ ]\\n+ duplicate_values_flag = False\\n+ for name in env_variable_names_to_ignore:\\n+ if name != \"ACCELERATE_MIXED_PRECISION\" and os.environ.get(name, None) is not None:\\n+ duplicate_values_flag = True\\n+ elif name == \"ACCELERATE_MIXED_PRECISION\" and os.environ.get(name, \"no\") != \"no\":\\n+ duplicate_values_flag = True\\n+ if duplicate_values_flag:\\n+ break\\n+\\n+ if duplicate_values_flag:\\n+ env_variable_names_to_ignore = [\\n+ name.replace(\"ACCELERATE_\", \"\").replace(\"DEEPSPEED_\", \"\").lower()\\n+ for name in env_variable_names_to_ignore\\n+ ]\\n+ raise ValueError(\\n+ f\"When using `deepspeed_config_file`, the following accelerate config variables will be ignored: {env_variable_names_to_ignore}.\\\\n\"\\n+ \"Please specify them appropriately in the DeepSpeed config file.\\\\n\"\\n+ \"If you are using an accelerate config file, set `mixed_precision=no` \"',\n", - " 'from_author': True},\n", - " {'body': 'to focus we are dealing with `mixed_precision` duplicity here:\\r\\n\\r\\nI\\'ve just tried:\\r\\n\\r\\n```\\r\\ndeepspeed_config:\\r\\n deepspeed_multinode_launcher: standard\\r\\n deepspeed_config_file: ./configs/vopt-large-z3/ds_config.json\\r\\n zero3_init_flag: true\\r\\ndistributed_type: DEEPSPEED\\r\\nmixed_precision: \\'no\\'\\r\\n```\\r\\n\\r\\n```\\r\\n{\\r\\n \"fp16\": {\\r\\n \"enabled\": true,\\r\\n [...]\\r\\n```\\r\\nand it doesn\\'t assert.\\r\\n\\r\\nwith the latest commit of `dd2d57b6a1`\\r\\n\\r\\n',\n", - " 'diff_hunk': '@@ -549,6 +551,40 @@ def zero3_init_context_manager(self, enable=False):\\n self.dschf = None\\n self.set_deepspeed_weakref()\\n \\n+ def _deepspeed_config_checks(self):\\n+ env_variable_names_to_ignore = [\\n+ \"ACCELERATE_GRADIENT_ACCUMULATION_STEPS\",\\n+ \"ACCELERATE_GRADIENT_CLIPPING\",\\n+ \"ACCELERATE_DEEPSPEED_ZERO_STAGE\",\\n+ \"ACCELERATE_DEEPSPEED_OFFLOAD_OPTIMIZER_DEVICE\",\\n+ \"ACCELERATE_DEEPSPEED_OFFLOAD_PARAM_DEVICE\",\\n+ \"ACCELERATE_DEEPSPEED_ZERO3_SAVE_16BIT_MODEL\",\\n+ \"ACCELERATE_MIXED_PRECISION\",\\n+ ]\\n+ duplicate_values_flag = False\\n+ for name in env_variable_names_to_ignore:\\n+ if name != \"ACCELERATE_MIXED_PRECISION\" and os.environ.get(name, None) is not None:\\n+ duplicate_values_flag = True\\n+ elif name == \"ACCELERATE_MIXED_PRECISION\" and os.environ.get(name, \"no\") != \"no\":\\n+ duplicate_values_flag = True\\n+ if duplicate_values_flag:\\n+ break\\n+\\n+ if duplicate_values_flag:\\n+ env_variable_names_to_ignore = [\\n+ name.replace(\"ACCELERATE_\", \"\").replace(\"DEEPSPEED_\", \"\").lower()\\n+ for name in env_variable_names_to_ignore\\n+ ]\\n+ raise ValueError(\\n+ f\"When using `deepspeed_config_file`, the following accelerate config variables will be ignored: {env_variable_names_to_ignore}.\\\\n\"\\n+ \"Please specify them appropriately in the DeepSpeed config file.\\\\n\"\\n+ \"If you are using an accelerate config file, set `mixed_precision=no` \"',\n", - " 'from_author': False},\n", - " {'body': \"(incidentally `mixed_precision: fp16|bf16|'no'` is odd - why do different values don't follow the same style - i.e 'no' in quotes but not the other values)\",\n", - " 'diff_hunk': '@@ -549,6 +551,40 @@ def zero3_init_context_manager(self, enable=False):\\n self.dschf = None\\n self.set_deepspeed_weakref()\\n \\n+ def _deepspeed_config_checks(self):\\n+ env_variable_names_to_ignore = [\\n+ \"ACCELERATE_GRADIENT_ACCUMULATION_STEPS\",\\n+ \"ACCELERATE_GRADIENT_CLIPPING\",\\n+ \"ACCELERATE_DEEPSPEED_ZERO_STAGE\",\\n+ \"ACCELERATE_DEEPSPEED_OFFLOAD_OPTIMIZER_DEVICE\",\\n+ \"ACCELERATE_DEEPSPEED_OFFLOAD_PARAM_DEVICE\",\\n+ \"ACCELERATE_DEEPSPEED_ZERO3_SAVE_16BIT_MODEL\",\\n+ \"ACCELERATE_MIXED_PRECISION\",\\n+ ]\\n+ duplicate_values_flag = False\\n+ for name in env_variable_names_to_ignore:\\n+ if name != \"ACCELERATE_MIXED_PRECISION\" and os.environ.get(name, None) is not None:\\n+ duplicate_values_flag = True\\n+ elif name == \"ACCELERATE_MIXED_PRECISION\" and os.environ.get(name, \"no\") != \"no\":\\n+ duplicate_values_flag = True\\n+ if duplicate_values_flag:\\n+ break\\n+\\n+ if duplicate_values_flag:\\n+ env_variable_names_to_ignore = [\\n+ name.replace(\"ACCELERATE_\", \"\").replace(\"DEEPSPEED_\", \"\").lower()\\n+ for name in env_variable_names_to_ignore\\n+ ]\\n+ raise ValueError(\\n+ f\"When using `deepspeed_config_file`, the following accelerate config variables will be ignored: {env_variable_names_to_ignore}.\\\\n\"\\n+ \"Please specify them appropriately in the DeepSpeed config file.\\\\n\"\\n+ \"If you are using an accelerate config file, set `mixed_precision=no` \"',\n", - " 'from_author': False},\n", - " {'body': \"Hello @stas, this case can't be helped as 'no' is the default value and if one doesn't specify the entry in accelerate config file, it will get default value of 'no' and thereby we have no way to check if user has given the default in config file or was it set to default when reading the config file. It would mean a lot of code rewriting as `mixed_precision` is used in various other non DeepSpeed parts.\\n\\nThe default value of `mixed_precision` would be overriden by that in `deepspeed_config_file`. Now, if you answer the questionnaire via `accelerate config`, the entry of `mixed_precision` won't be there when using `deepspeed_config_file`. \\n\\n\\n@sgugger, what are your thoughts around this. \",\n", - " 'diff_hunk': '@@ -549,6 +551,40 @@ def zero3_init_context_manager(self, enable=False):\\n self.dschf = None\\n self.set_deepspeed_weakref()\\n \\n+ def _deepspeed_config_checks(self):\\n+ env_variable_names_to_ignore = [\\n+ \"ACCELERATE_GRADIENT_ACCUMULATION_STEPS\",\\n+ \"ACCELERATE_GRADIENT_CLIPPING\",\\n+ \"ACCELERATE_DEEPSPEED_ZERO_STAGE\",\\n+ \"ACCELERATE_DEEPSPEED_OFFLOAD_OPTIMIZER_DEVICE\",\\n+ \"ACCELERATE_DEEPSPEED_OFFLOAD_PARAM_DEVICE\",\\n+ \"ACCELERATE_DEEPSPEED_ZERO3_SAVE_16BIT_MODEL\",\\n+ \"ACCELERATE_MIXED_PRECISION\",\\n+ ]\\n+ duplicate_values_flag = False\\n+ for name in env_variable_names_to_ignore:\\n+ if name != \"ACCELERATE_MIXED_PRECISION\" and os.environ.get(name, None) is not None:\\n+ duplicate_values_flag = True\\n+ elif name == \"ACCELERATE_MIXED_PRECISION\" and os.environ.get(name, \"no\") != \"no\":\\n+ duplicate_values_flag = True\\n+ if duplicate_values_flag:\\n+ break\\n+\\n+ if duplicate_values_flag:\\n+ env_variable_names_to_ignore = [\\n+ name.replace(\"ACCELERATE_\", \"\").replace(\"DEEPSPEED_\", \"\").lower()\\n+ for name in env_variable_names_to_ignore\\n+ ]\\n+ raise ValueError(\\n+ f\"When using `deepspeed_config_file`, the following accelerate config variables will be ignored: {env_variable_names_to_ignore}.\\\\n\"\\n+ \"Please specify them appropriately in the DeepSpeed config file.\\\\n\"\\n+ \"If you are using an accelerate config file, set `mixed_precision=no` \"',\n", - " 'from_author': True},\n", - " {'body': \"understood! in which case I think this should work:\\r\\n\\r\\n1. if the `mixed_precision` config isn't in the accelerate config file all is good\\r\\n2. if the `mixed_precision` config is in the accelerate config file and ds_config file is used, the value of the former must match the value of the latter or assert.\",\n", - " 'diff_hunk': '@@ -549,6 +551,40 @@ def zero3_init_context_manager(self, enable=False):\\n self.dschf = None\\n self.set_deepspeed_weakref()\\n \\n+ def _deepspeed_config_checks(self):\\n+ env_variable_names_to_ignore = [\\n+ \"ACCELERATE_GRADIENT_ACCUMULATION_STEPS\",\\n+ \"ACCELERATE_GRADIENT_CLIPPING\",\\n+ \"ACCELERATE_DEEPSPEED_ZERO_STAGE\",\\n+ \"ACCELERATE_DEEPSPEED_OFFLOAD_OPTIMIZER_DEVICE\",\\n+ \"ACCELERATE_DEEPSPEED_OFFLOAD_PARAM_DEVICE\",\\n+ \"ACCELERATE_DEEPSPEED_ZERO3_SAVE_16BIT_MODEL\",\\n+ \"ACCELERATE_MIXED_PRECISION\",\\n+ ]\\n+ duplicate_values_flag = False\\n+ for name in env_variable_names_to_ignore:\\n+ if name != \"ACCELERATE_MIXED_PRECISION\" and os.environ.get(name, None) is not None:\\n+ duplicate_values_flag = True\\n+ elif name == \"ACCELERATE_MIXED_PRECISION\" and os.environ.get(name, \"no\") != \"no\":\\n+ duplicate_values_flag = True\\n+ if duplicate_values_flag:\\n+ break\\n+\\n+ if duplicate_values_flag:\\n+ env_variable_names_to_ignore = [\\n+ name.replace(\"ACCELERATE_\", \"\").replace(\"DEEPSPEED_\", \"\").lower()\\n+ for name in env_variable_names_to_ignore\\n+ ]\\n+ raise ValueError(\\n+ f\"When using `deepspeed_config_file`, the following accelerate config variables will be ignored: {env_variable_names_to_ignore}.\\\\n\"\\n+ \"Please specify them appropriately in the DeepSpeed config file.\\\\n\"\\n+ \"If you are using an accelerate config file, set `mixed_precision=no` \"',\n", - " 'from_author': False},\n", - " {'body': '```suggestion\\r\\nBelow is a sample script using `deepspeed_config_file` in different scenarios.\\r\\n```',\n", - " 'diff_hunk': '@@ -395,6 +395,194 @@ We will look at the changes needed in the code when using these.\\n based on model, dataloaders, dummy optimizer and dummy schedulers provided to `prepare` method. \\n Only the `auto` fields specified in above examples are handled by `prepare` method and the rest have to be explicitly specified by the user.\\n \\n+**Things to note when using DeepSpeed Config File**\\n+\\n+Below is a smaple script using `deepspeed_config_file` in different scenarios.',\n", - " 'from_author': False},\n", - " {'body': 'Are the lines of `-` intended? Should there be a new line before the text?',\n", - " 'diff_hunk': '@@ -395,6 +395,194 @@ We will look at the changes needed in the code when using these.\\n based on model, dataloaders, dummy optimizer and dummy schedulers provided to `prepare` method. \\n Only the `auto` fields specified in above examples are handled by `prepare` method and the rest have to be explicitly specified by the user.\\n \\n+**Things to note when using DeepSpeed Config File**\\n+\\n+Below is a smaple script using `deepspeed_config_file` in different scenarios.\\n+\\n+Code `test.py`:\\n+\\n+```python\\n+from accelerate import Accelerator\\n+from accelerate.state import AcceleratorState\\n+\\n+\\n+def main():\\n+ accelerator = Accelerator()\\n+ accelerator.print(f\"{AcceleratorState()}\")\\n+\\n+\\n+if __name__ == \"__main__\":\\n+ main()\\n+```\\n+\\n+**Scenario 1**: Manually tampered accelerate config file having `deepspeed_config_file` along with other entries.\\n+\\n+1. `accelerate config`:\\n+\\n+```yaml\\n+command_file: null\\n+commands: null\\n+compute_environment: LOCAL_MACHINE\\n+deepspeed_config:\\n+ gradient_accumulation_steps: 1\\n+ gradient_clipping: 1.0\\n+ offload_optimizer_device: \\'cpu\\'\\n+ offload_param_device: \\'cpu\\'\\n+ zero3_init_flag: true\\n+ zero3_save_16bit_model: true\\n+ zero_stage: 3\\n+ deepspeed_config_file: \\'ds_config.json\\'\\n+distributed_type: DEEPSPEED\\n+downcast_bf16: \\'no\\'\\n+dynamo_backend: \\'NO\\'\\n+fsdp_config: {}\\n+gpu_ids: null\\n+machine_rank: 0\\n+main_process_ip: null\\n+main_process_port: null\\n+main_training_function: main\\n+megatron_lm_config: {}\\n+num_machines: 1\\n+num_processes: 2\\n+rdzv_backend: static\\n+same_network: true\\n+tpu_name: null\\n+tpu_zone: null\\n+use_cpu: false\\n+```\\n+\\n+2. `ds_config.json`:\\n+\\n+```json\\n+{\\n+ \"bf16\": {\\n+ \"enabled\": true\\n+ },\\n+ \"zero_optimization\": {\\n+ \"stage\": 3,\\n+ \"stage3_gather_16bit_weights_on_model_save\": false,\\n+ \"offload_optimizer\": {\\n+ \"device\": \"none\"\\n+ },\\n+ \"offload_param\": {\\n+ \"device\": \"none\"\\n+ }\\n+ },\\n+ \"gradient_clipping\": 1.0,\\n+ \"train_batch_size\": \"auto\",\\n+ \"train_micro_batch_size_per_gpu\": \"auto\",\\n+ \"gradient_accumulation_steps\": 10,\\n+ \"steps_per_print\": 2000000\\n+}\\n+```\\n+\\n+3. Output of `accelerate launch test.py`:\\n+\\n+```bash\\n+ValueError: When using `deepspeed_config_file`, the following accelerate config variables will be ignored: \\n+[\\'gradient_accumulation_steps\\', \\'gradient_clipping\\', \\'zero_stage\\', \\'offload_optimizer_device\\', \\'offload_param_device\\', \\n+\\'zero3_save_16bit_model\\', \\'mixed_precision\\'].\\n+Please specify them appropriately in the DeepSpeed config file.\\n+If you are using an accelerate config file, remove others config variables mentioned in the above specified list.\\n+The easiest method is to create a new config following the questionnaire via `accelerate config`.\\n+It will only ask for the necessary config variables when using `deepspeed_config_file`.\\n+```\\n+\\n+**Scenario 2**: Use the solution of the error to create new accelerate config and check that no ambiguity error is now thrown.\\n+\\n+1. Run `accelerate config`:\\n+\\n+```bash\\n+$ accelerate config\\n+-------------------------------------------------------------------------------------------------------------------------------In which compute environment are you running?\\n+This machine \\n+-------------------------------------------------------------------------------------------------------------------------------Which type of machine are you using? ',\n", - " 'from_author': False},\n", - " {'body': '```suggestion\\r\\n1. Content of the `accelerate` config:\\r\\n```\\r\\n(to not mistake this with the command `accelerate config`',\n", - " 'diff_hunk': '@@ -395,6 +395,194 @@ We will look at the changes needed in the code when using these.\\n based on model, dataloaders, dummy optimizer and dummy schedulers provided to `prepare` method. \\n Only the `auto` fields specified in above examples are handled by `prepare` method and the rest have to be explicitly specified by the user.\\n \\n+**Things to note when using DeepSpeed Config File**\\n+\\n+Below is a smaple script using `deepspeed_config_file` in different scenarios.\\n+\\n+Code `test.py`:\\n+\\n+```python\\n+from accelerate import Accelerator\\n+from accelerate.state import AcceleratorState\\n+\\n+\\n+def main():\\n+ accelerator = Accelerator()\\n+ accelerator.print(f\"{AcceleratorState()}\")\\n+\\n+\\n+if __name__ == \"__main__\":\\n+ main()\\n+```\\n+\\n+**Scenario 1**: Manually tampered accelerate config file having `deepspeed_config_file` along with other entries.\\n+\\n+1. `accelerate config`:',\n", - " 'from_author': False},\n", - " {'body': '```suggestion\\r\\n2. Content of the `accelerate` config:\\r\\n```',\n", - " 'diff_hunk': '@@ -395,6 +395,194 @@ We will look at the changes needed in the code when using these.\\n based on model, dataloaders, dummy optimizer and dummy schedulers provided to `prepare` method. \\n Only the `auto` fields specified in above examples are handled by `prepare` method and the rest have to be explicitly specified by the user.\\n \\n+**Things to note when using DeepSpeed Config File**\\n+\\n+Below is a smaple script using `deepspeed_config_file` in different scenarios.\\n+\\n+Code `test.py`:\\n+\\n+```python\\n+from accelerate import Accelerator\\n+from accelerate.state import AcceleratorState\\n+\\n+\\n+def main():\\n+ accelerator = Accelerator()\\n+ accelerator.print(f\"{AcceleratorState()}\")\\n+\\n+\\n+if __name__ == \"__main__\":\\n+ main()\\n+```\\n+\\n+**Scenario 1**: Manually tampered accelerate config file having `deepspeed_config_file` along with other entries.\\n+\\n+1. `accelerate config`:\\n+\\n+```yaml\\n+command_file: null\\n+commands: null\\n+compute_environment: LOCAL_MACHINE\\n+deepspeed_config:\\n+ gradient_accumulation_steps: 1\\n+ gradient_clipping: 1.0\\n+ offload_optimizer_device: \\'cpu\\'\\n+ offload_param_device: \\'cpu\\'\\n+ zero3_init_flag: true\\n+ zero3_save_16bit_model: true\\n+ zero_stage: 3\\n+ deepspeed_config_file: \\'ds_config.json\\'\\n+distributed_type: DEEPSPEED\\n+downcast_bf16: \\'no\\'\\n+dynamo_backend: \\'NO\\'\\n+fsdp_config: {}\\n+gpu_ids: null\\n+machine_rank: 0\\n+main_process_ip: null\\n+main_process_port: null\\n+main_training_function: main\\n+megatron_lm_config: {}\\n+num_machines: 1\\n+num_processes: 2\\n+rdzv_backend: static\\n+same_network: true\\n+tpu_name: null\\n+tpu_zone: null\\n+use_cpu: false\\n+```\\n+\\n+2. `ds_config.json`:\\n+\\n+```json\\n+{\\n+ \"bf16\": {\\n+ \"enabled\": true\\n+ },\\n+ \"zero_optimization\": {\\n+ \"stage\": 3,\\n+ \"stage3_gather_16bit_weights_on_model_save\": false,\\n+ \"offload_optimizer\": {\\n+ \"device\": \"none\"\\n+ },\\n+ \"offload_param\": {\\n+ \"device\": \"none\"\\n+ }\\n+ },\\n+ \"gradient_clipping\": 1.0,\\n+ \"train_batch_size\": \"auto\",\\n+ \"train_micro_batch_size_per_gpu\": \"auto\",\\n+ \"gradient_accumulation_steps\": 10,\\n+ \"steps_per_print\": 2000000\\n+}\\n+```\\n+\\n+3. Output of `accelerate launch test.py`:\\n+\\n+```bash\\n+ValueError: When using `deepspeed_config_file`, the following accelerate config variables will be ignored: \\n+[\\'gradient_accumulation_steps\\', \\'gradient_clipping\\', \\'zero_stage\\', \\'offload_optimizer_device\\', \\'offload_param_device\\', \\n+\\'zero3_save_16bit_model\\', \\'mixed_precision\\'].\\n+Please specify them appropriately in the DeepSpeed config file.\\n+If you are using an accelerate config file, remove others config variables mentioned in the above specified list.\\n+The easiest method is to create a new config following the questionnaire via `accelerate config`.\\n+It will only ask for the necessary config variables when using `deepspeed_config_file`.\\n+```\\n+\\n+**Scenario 2**: Use the solution of the error to create new accelerate config and check that no ambiguity error is now thrown.\\n+\\n+1. Run `accelerate config`:\\n+\\n+```bash\\n+$ accelerate config\\n+-------------------------------------------------------------------------------------------------------------------------------In which compute environment are you running?\\n+This machine \\n+-------------------------------------------------------------------------------------------------------------------------------Which type of machine are you using? \\n+multi-GPU \\n+How many different machines will you use (use more than 1 for multi-node training)? [1]: \\n+Do you wish to optimize your script with torch dynamo?[yes/NO]: \\n+Do you want to use DeepSpeed? [yes/NO]: yes \\n+Do you want to specify a json file to a DeepSpeed config? [yes/NO]: yes \\n+Please enter the path to the json DeepSpeed config file: ds_config.json \\n+Do you want to enable `deepspeed.zero.Init` when using ZeRO Stage-3 for constructing massive models? [yes/NO]: yes\\n+How many GPU(s) should be used for distributed training? [1]:4\\n+accelerate configuration saved at ds_config_sample.yaml\\n+```\\n+\\n+2. `accelerate config`:',\n", - " 'from_author': False},\n", - " {'body': '```suggestion\\r\\n**Scenario 3**: Setting the `accelerate launch` command arguments related to DeepSpeed as `\"auto\"` in the DeepSpeed` configuration file and check that things work as expected.\\r\\n```',\n", - " 'diff_hunk': '@@ -395,6 +395,194 @@ We will look at the changes needed in the code when using these.\\n based on model, dataloaders, dummy optimizer and dummy schedulers provided to `prepare` method. \\n Only the `auto` fields specified in above examples are handled by `prepare` method and the rest have to be explicitly specified by the user.\\n \\n+**Things to note when using DeepSpeed Config File**\\n+\\n+Below is a smaple script using `deepspeed_config_file` in different scenarios.\\n+\\n+Code `test.py`:\\n+\\n+```python\\n+from accelerate import Accelerator\\n+from accelerate.state import AcceleratorState\\n+\\n+\\n+def main():\\n+ accelerator = Accelerator()\\n+ accelerator.print(f\"{AcceleratorState()}\")\\n+\\n+\\n+if __name__ == \"__main__\":\\n+ main()\\n+```\\n+\\n+**Scenario 1**: Manually tampered accelerate config file having `deepspeed_config_file` along with other entries.\\n+\\n+1. `accelerate config`:\\n+\\n+```yaml\\n+command_file: null\\n+commands: null\\n+compute_environment: LOCAL_MACHINE\\n+deepspeed_config:\\n+ gradient_accumulation_steps: 1\\n+ gradient_clipping: 1.0\\n+ offload_optimizer_device: \\'cpu\\'\\n+ offload_param_device: \\'cpu\\'\\n+ zero3_init_flag: true\\n+ zero3_save_16bit_model: true\\n+ zero_stage: 3\\n+ deepspeed_config_file: \\'ds_config.json\\'\\n+distributed_type: DEEPSPEED\\n+downcast_bf16: \\'no\\'\\n+dynamo_backend: \\'NO\\'\\n+fsdp_config: {}\\n+gpu_ids: null\\n+machine_rank: 0\\n+main_process_ip: null\\n+main_process_port: null\\n+main_training_function: main\\n+megatron_lm_config: {}\\n+num_machines: 1\\n+num_processes: 2\\n+rdzv_backend: static\\n+same_network: true\\n+tpu_name: null\\n+tpu_zone: null\\n+use_cpu: false\\n+```\\n+\\n+2. `ds_config.json`:\\n+\\n+```json\\n+{\\n+ \"bf16\": {\\n+ \"enabled\": true\\n+ },\\n+ \"zero_optimization\": {\\n+ \"stage\": 3,\\n+ \"stage3_gather_16bit_weights_on_model_save\": false,\\n+ \"offload_optimizer\": {\\n+ \"device\": \"none\"\\n+ },\\n+ \"offload_param\": {\\n+ \"device\": \"none\"\\n+ }\\n+ },\\n+ \"gradient_clipping\": 1.0,\\n+ \"train_batch_size\": \"auto\",\\n+ \"train_micro_batch_size_per_gpu\": \"auto\",\\n+ \"gradient_accumulation_steps\": 10,\\n+ \"steps_per_print\": 2000000\\n+}\\n+```\\n+\\n+3. Output of `accelerate launch test.py`:\\n+\\n+```bash\\n+ValueError: When using `deepspeed_config_file`, the following accelerate config variables will be ignored: \\n+[\\'gradient_accumulation_steps\\', \\'gradient_clipping\\', \\'zero_stage\\', \\'offload_optimizer_device\\', \\'offload_param_device\\', \\n+\\'zero3_save_16bit_model\\', \\'mixed_precision\\'].\\n+Please specify them appropriately in the DeepSpeed config file.\\n+If you are using an accelerate config file, remove others config variables mentioned in the above specified list.\\n+The easiest method is to create a new config following the questionnaire via `accelerate config`.\\n+It will only ask for the necessary config variables when using `deepspeed_config_file`.\\n+```\\n+\\n+**Scenario 2**: Use the solution of the error to create new accelerate config and check that no ambiguity error is now thrown.\\n+\\n+1. Run `accelerate config`:\\n+\\n+```bash\\n+$ accelerate config\\n+-------------------------------------------------------------------------------------------------------------------------------In which compute environment are you running?\\n+This machine \\n+-------------------------------------------------------------------------------------------------------------------------------Which type of machine are you using? \\n+multi-GPU \\n+How many different machines will you use (use more than 1 for multi-node training)? [1]: \\n+Do you wish to optimize your script with torch dynamo?[yes/NO]: \\n+Do you want to use DeepSpeed? [yes/NO]: yes \\n+Do you want to specify a json file to a DeepSpeed config? [yes/NO]: yes \\n+Please enter the path to the json DeepSpeed config file: ds_config.json \\n+Do you want to enable `deepspeed.zero.Init` when using ZeRO Stage-3 for constructing massive models? [yes/NO]: yes\\n+How many GPU(s) should be used for distributed training? [1]:4\\n+accelerate configuration saved at ds_config_sample.yaml\\n+```\\n+\\n+2. `accelerate config`:\\n+\\n+```yaml\\n+compute_environment: LOCAL_MACHINE\\n+deepspeed_config:\\n+ deepspeed_config_file: ds_config.json\\n+ zero3_init_flag: true\\n+distributed_type: DEEPSPEED\\n+downcast_bf16: \\'no\\'\\n+dynamo_backend: \\'NO\\'\\n+fsdp_config: {}\\n+machine_rank: 0\\n+main_training_function: main\\n+megatron_lm_config: {}\\n+num_machines: 1\\n+num_processes: 4\\n+rdzv_backend: static\\n+same_network: true\\n+use_cpu: false\\n+```\\n+\\n+3. Output of `accelerate launch test.py`:\\n+\\n+```bash\\n+Distributed environment: DEEPSPEED Backend: nccl\\n+Num processes: 4\\n+Process index: 0\\n+Local process index: 0\\n+Device: cuda:0\\n+Mixed precision type: bf16\\n+ds_config: {\\'bf16\\': {\\'enabled\\': True}, \\'zero_optimization\\': {\\'stage\\': 3, \\'stage3_gather_16bit_weights_on_model_save\\': False, \\'offload_optimizer\\': {\\'device\\': \\'none\\'}, \\'offload_param\\': {\\'device\\': \\'none\\'}}, \\'gradient_clipping\\': 1.0, \\'train_batch_size\\': \\'auto\\', \\'train_micro_batch_size_per_gpu\\': \\'auto\\', \\'gradient_accumulation_steps\\': 10, \\'steps_per_print\\': inf, \\'fp16\\': {\\'enabled\\': False}}\\n+```\\n+\\n+**Scenario 3**: Setting the `accelerate launch` cmd args related to deepspeed as `auto` in `deepspeed_config_file` and check that things work as expected.',\n", - " 'from_author': False},\n", - " {'body': '```suggestion\\r\\n1. New `ds_config.json` with `\"auto\"` for the `accelerate launch` DeepSpeed command arguments:\\r\\n```',\n", - " 'diff_hunk': '@@ -395,6 +395,194 @@ We will look at the changes needed in the code when using these.\\n based on model, dataloaders, dummy optimizer and dummy schedulers provided to `prepare` method. \\n Only the `auto` fields specified in above examples are handled by `prepare` method and the rest have to be explicitly specified by the user.\\n \\n+**Things to note when using DeepSpeed Config File**\\n+\\n+Below is a smaple script using `deepspeed_config_file` in different scenarios.\\n+\\n+Code `test.py`:\\n+\\n+```python\\n+from accelerate import Accelerator\\n+from accelerate.state import AcceleratorState\\n+\\n+\\n+def main():\\n+ accelerator = Accelerator()\\n+ accelerator.print(f\"{AcceleratorState()}\")\\n+\\n+\\n+if __name__ == \"__main__\":\\n+ main()\\n+```\\n+\\n+**Scenario 1**: Manually tampered accelerate config file having `deepspeed_config_file` along with other entries.\\n+\\n+1. `accelerate config`:\\n+\\n+```yaml\\n+command_file: null\\n+commands: null\\n+compute_environment: LOCAL_MACHINE\\n+deepspeed_config:\\n+ gradient_accumulation_steps: 1\\n+ gradient_clipping: 1.0\\n+ offload_optimizer_device: \\'cpu\\'\\n+ offload_param_device: \\'cpu\\'\\n+ zero3_init_flag: true\\n+ zero3_save_16bit_model: true\\n+ zero_stage: 3\\n+ deepspeed_config_file: \\'ds_config.json\\'\\n+distributed_type: DEEPSPEED\\n+downcast_bf16: \\'no\\'\\n+dynamo_backend: \\'NO\\'\\n+fsdp_config: {}\\n+gpu_ids: null\\n+machine_rank: 0\\n+main_process_ip: null\\n+main_process_port: null\\n+main_training_function: main\\n+megatron_lm_config: {}\\n+num_machines: 1\\n+num_processes: 2\\n+rdzv_backend: static\\n+same_network: true\\n+tpu_name: null\\n+tpu_zone: null\\n+use_cpu: false\\n+```\\n+\\n+2. `ds_config.json`:\\n+\\n+```json\\n+{\\n+ \"bf16\": {\\n+ \"enabled\": true\\n+ },\\n+ \"zero_optimization\": {\\n+ \"stage\": 3,\\n+ \"stage3_gather_16bit_weights_on_model_save\": false,\\n+ \"offload_optimizer\": {\\n+ \"device\": \"none\"\\n+ },\\n+ \"offload_param\": {\\n+ \"device\": \"none\"\\n+ }\\n+ },\\n+ \"gradient_clipping\": 1.0,\\n+ \"train_batch_size\": \"auto\",\\n+ \"train_micro_batch_size_per_gpu\": \"auto\",\\n+ \"gradient_accumulation_steps\": 10,\\n+ \"steps_per_print\": 2000000\\n+}\\n+```\\n+\\n+3. Output of `accelerate launch test.py`:\\n+\\n+```bash\\n+ValueError: When using `deepspeed_config_file`, the following accelerate config variables will be ignored: \\n+[\\'gradient_accumulation_steps\\', \\'gradient_clipping\\', \\'zero_stage\\', \\'offload_optimizer_device\\', \\'offload_param_device\\', \\n+\\'zero3_save_16bit_model\\', \\'mixed_precision\\'].\\n+Please specify them appropriately in the DeepSpeed config file.\\n+If you are using an accelerate config file, remove others config variables mentioned in the above specified list.\\n+The easiest method is to create a new config following the questionnaire via `accelerate config`.\\n+It will only ask for the necessary config variables when using `deepspeed_config_file`.\\n+```\\n+\\n+**Scenario 2**: Use the solution of the error to create new accelerate config and check that no ambiguity error is now thrown.\\n+\\n+1. Run `accelerate config`:\\n+\\n+```bash\\n+$ accelerate config\\n+-------------------------------------------------------------------------------------------------------------------------------In which compute environment are you running?\\n+This machine \\n+-------------------------------------------------------------------------------------------------------------------------------Which type of machine are you using? \\n+multi-GPU \\n+How many different machines will you use (use more than 1 for multi-node training)? [1]: \\n+Do you wish to optimize your script with torch dynamo?[yes/NO]: \\n+Do you want to use DeepSpeed? [yes/NO]: yes \\n+Do you want to specify a json file to a DeepSpeed config? [yes/NO]: yes \\n+Please enter the path to the json DeepSpeed config file: ds_config.json \\n+Do you want to enable `deepspeed.zero.Init` when using ZeRO Stage-3 for constructing massive models? [yes/NO]: yes\\n+How many GPU(s) should be used for distributed training? [1]:4\\n+accelerate configuration saved at ds_config_sample.yaml\\n+```\\n+\\n+2. `accelerate config`:\\n+\\n+```yaml\\n+compute_environment: LOCAL_MACHINE\\n+deepspeed_config:\\n+ deepspeed_config_file: ds_config.json\\n+ zero3_init_flag: true\\n+distributed_type: DEEPSPEED\\n+downcast_bf16: \\'no\\'\\n+dynamo_backend: \\'NO\\'\\n+fsdp_config: {}\\n+machine_rank: 0\\n+main_training_function: main\\n+megatron_lm_config: {}\\n+num_machines: 1\\n+num_processes: 4\\n+rdzv_backend: static\\n+same_network: true\\n+use_cpu: false\\n+```\\n+\\n+3. Output of `accelerate launch test.py`:\\n+\\n+```bash\\n+Distributed environment: DEEPSPEED Backend: nccl\\n+Num processes: 4\\n+Process index: 0\\n+Local process index: 0\\n+Device: cuda:0\\n+Mixed precision type: bf16\\n+ds_config: {\\'bf16\\': {\\'enabled\\': True}, \\'zero_optimization\\': {\\'stage\\': 3, \\'stage3_gather_16bit_weights_on_model_save\\': False, \\'offload_optimizer\\': {\\'device\\': \\'none\\'}, \\'offload_param\\': {\\'device\\': \\'none\\'}}, \\'gradient_clipping\\': 1.0, \\'train_batch_size\\': \\'auto\\', \\'train_micro_batch_size_per_gpu\\': \\'auto\\', \\'gradient_accumulation_steps\\': 10, \\'steps_per_print\\': inf, \\'fp16\\': {\\'enabled\\': False}}\\n+```\\n+\\n+**Scenario 3**: Setting the `accelerate launch` cmd args related to deepspeed as `auto` in `deepspeed_config_file` and check that things work as expected.\\n+\\n+1. New `ds_config.json` with `auto` for the `accelerate launch` deepspeed cmd args:',\n", - " 'from_author': False},\n", - " {'body': '```suggestion\\r\\n**Note**: Remaining `\"auto\"` values are handled in `accelerator.prepare()` call as explained in point 2 of \\r\\n```',\n", - " 'diff_hunk': '@@ -395,6 +395,194 @@ We will look at the changes needed in the code when using these.\\n based on model, dataloaders, dummy optimizer and dummy schedulers provided to `prepare` method. \\n Only the `auto` fields specified in above examples are handled by `prepare` method and the rest have to be explicitly specified by the user.\\n \\n+**Things to note when using DeepSpeed Config File**\\n+\\n+Below is a smaple script using `deepspeed_config_file` in different scenarios.\\n+\\n+Code `test.py`:\\n+\\n+```python\\n+from accelerate import Accelerator\\n+from accelerate.state import AcceleratorState\\n+\\n+\\n+def main():\\n+ accelerator = Accelerator()\\n+ accelerator.print(f\"{AcceleratorState()}\")\\n+\\n+\\n+if __name__ == \"__main__\":\\n+ main()\\n+```\\n+\\n+**Scenario 1**: Manually tampered accelerate config file having `deepspeed_config_file` along with other entries.\\n+\\n+1. `accelerate config`:\\n+\\n+```yaml\\n+command_file: null\\n+commands: null\\n+compute_environment: LOCAL_MACHINE\\n+deepspeed_config:\\n+ gradient_accumulation_steps: 1\\n+ gradient_clipping: 1.0\\n+ offload_optimizer_device: \\'cpu\\'\\n+ offload_param_device: \\'cpu\\'\\n+ zero3_init_flag: true\\n+ zero3_save_16bit_model: true\\n+ zero_stage: 3\\n+ deepspeed_config_file: \\'ds_config.json\\'\\n+distributed_type: DEEPSPEED\\n+downcast_bf16: \\'no\\'\\n+dynamo_backend: \\'NO\\'\\n+fsdp_config: {}\\n+gpu_ids: null\\n+machine_rank: 0\\n+main_process_ip: null\\n+main_process_port: null\\n+main_training_function: main\\n+megatron_lm_config: {}\\n+num_machines: 1\\n+num_processes: 2\\n+rdzv_backend: static\\n+same_network: true\\n+tpu_name: null\\n+tpu_zone: null\\n+use_cpu: false\\n+```\\n+\\n+2. `ds_config.json`:\\n+\\n+```json\\n+{\\n+ \"bf16\": {\\n+ \"enabled\": true\\n+ },\\n+ \"zero_optimization\": {\\n+ \"stage\": 3,\\n+ \"stage3_gather_16bit_weights_on_model_save\": false,\\n+ \"offload_optimizer\": {\\n+ \"device\": \"none\"\\n+ },\\n+ \"offload_param\": {\\n+ \"device\": \"none\"\\n+ }\\n+ },\\n+ \"gradient_clipping\": 1.0,\\n+ \"train_batch_size\": \"auto\",\\n+ \"train_micro_batch_size_per_gpu\": \"auto\",\\n+ \"gradient_accumulation_steps\": 10,\\n+ \"steps_per_print\": 2000000\\n+}\\n+```\\n+\\n+3. Output of `accelerate launch test.py`:\\n+\\n+```bash\\n+ValueError: When using `deepspeed_config_file`, the following accelerate config variables will be ignored: \\n+[\\'gradient_accumulation_steps\\', \\'gradient_clipping\\', \\'zero_stage\\', \\'offload_optimizer_device\\', \\'offload_param_device\\', \\n+\\'zero3_save_16bit_model\\', \\'mixed_precision\\'].\\n+Please specify them appropriately in the DeepSpeed config file.\\n+If you are using an accelerate config file, remove others config variables mentioned in the above specified list.\\n+The easiest method is to create a new config following the questionnaire via `accelerate config`.\\n+It will only ask for the necessary config variables when using `deepspeed_config_file`.\\n+```\\n+\\n+**Scenario 2**: Use the solution of the error to create new accelerate config and check that no ambiguity error is now thrown.\\n+\\n+1. Run `accelerate config`:\\n+\\n+```bash\\n+$ accelerate config\\n+-------------------------------------------------------------------------------------------------------------------------------In which compute environment are you running?\\n+This machine \\n+-------------------------------------------------------------------------------------------------------------------------------Which type of machine are you using? \\n+multi-GPU \\n+How many different machines will you use (use more than 1 for multi-node training)? [1]: \\n+Do you wish to optimize your script with torch dynamo?[yes/NO]: \\n+Do you want to use DeepSpeed? [yes/NO]: yes \\n+Do you want to specify a json file to a DeepSpeed config? [yes/NO]: yes \\n+Please enter the path to the json DeepSpeed config file: ds_config.json \\n+Do you want to enable `deepspeed.zero.Init` when using ZeRO Stage-3 for constructing massive models? [yes/NO]: yes\\n+How many GPU(s) should be used for distributed training? [1]:4\\n+accelerate configuration saved at ds_config_sample.yaml\\n+```\\n+\\n+2. `accelerate config`:\\n+\\n+```yaml\\n+compute_environment: LOCAL_MACHINE\\n+deepspeed_config:\\n+ deepspeed_config_file: ds_config.json\\n+ zero3_init_flag: true\\n+distributed_type: DEEPSPEED\\n+downcast_bf16: \\'no\\'\\n+dynamo_backend: \\'NO\\'\\n+fsdp_config: {}\\n+machine_rank: 0\\n+main_training_function: main\\n+megatron_lm_config: {}\\n+num_machines: 1\\n+num_processes: 4\\n+rdzv_backend: static\\n+same_network: true\\n+use_cpu: false\\n+```\\n+\\n+3. Output of `accelerate launch test.py`:\\n+\\n+```bash\\n+Distributed environment: DEEPSPEED Backend: nccl\\n+Num processes: 4\\n+Process index: 0\\n+Local process index: 0\\n+Device: cuda:0\\n+Mixed precision type: bf16\\n+ds_config: {\\'bf16\\': {\\'enabled\\': True}, \\'zero_optimization\\': {\\'stage\\': 3, \\'stage3_gather_16bit_weights_on_model_save\\': False, \\'offload_optimizer\\': {\\'device\\': \\'none\\'}, \\'offload_param\\': {\\'device\\': \\'none\\'}}, \\'gradient_clipping\\': 1.0, \\'train_batch_size\\': \\'auto\\', \\'train_micro_batch_size_per_gpu\\': \\'auto\\', \\'gradient_accumulation_steps\\': 10, \\'steps_per_print\\': inf, \\'fp16\\': {\\'enabled\\': False}}\\n+```\\n+\\n+**Scenario 3**: Setting the `accelerate launch` cmd args related to deepspeed as `auto` in `deepspeed_config_file` and check that things work as expected.\\n+\\n+1. New `ds_config.json` with `auto` for the `accelerate launch` deepspeed cmd args:\\n+\\n+```json\\n+{\\n+ \"bf16\": {\\n+ \"enabled\": \"auto\"\\n+ },\\n+ \"zero_optimization\": {\\n+ \"stage\": \"auto\",\\n+ \"stage3_gather_16bit_weights_on_model_save\": \"auto\",\\n+ \"offload_optimizer\": {\\n+ \"device\": \"auto\"\\n+ },\\n+ \"offload_param\": {\\n+ \"device\": \"auto\"\\n+ }\\n+ },\\n+ \"gradient_clipping\": \"auto\",\\n+ \"train_batch_size\": \"auto\",\\n+ \"train_micro_batch_size_per_gpu\": \"auto\",\\n+ \"gradient_accumulation_steps\": \"auto\",\\n+ \"steps_per_print\": 2000000\\n+}\\n+```\\n+\\n+2. Output of `accelerate launch --mixed_precision=\"fp16\" --zero_stage=3 --gradient_accumulation_steps=5 --gradient_clipping=1.0 --offload_param_device=\"cpu\" --offload_optimizer_device=\"nvme\" --zero3_save_16bit_model=\"true\" test.py`:\\n+\\n+```bash\\n+Distributed environment: DEEPSPEED Backend: nccl\\n+Num processes: 4\\n+Process index: 0\\n+Local process index: 0\\n+Device: cuda:0\\n+Mixed precision type: fp16\\n+ds_config: {\\'bf16\\': {\\'enabled\\': False}, \\'zero_optimization\\': {\\'stage\\': 3, \\'stage3_gather_16bit_weights_on_model_save\\': True, \\'offload_optimizer\\': {\\'device\\': \\'nvme\\'}, \\'offload_param\\': {\\'device\\': \\'cpu\\'}}, \\'gradient_clipping\\': 1.0, \\'train_batch_size\\': \\'auto\\', \\'train_micro_batch_size_per_gpu\\': \\'auto\\', \\'gradient_accumulation_steps\\': 5, \\'steps_per_print\\': inf, \\'fp16\\': {\\'enabled\\': True, \\'auto_cast\\': True}}\\n+```\\n+\\n+**Note**: Remaining `auto` values are handled in `accelerator.prepare()` call as explained in point 2 of ',\n", - " 'from_author': False},\n", - " {'body': 'Why is this set to True by default now?',\n", - " 'diff_hunk': '@@ -450,15 +461,17 @@ def __post_init__(self):\\n if self.gradient_clipping:\\n config[\"gradient_clipping\"] = self.gradient_clipping\\n self.hf_ds_config = HfDeepSpeedConfig(config)\\n+\\n self.deepspeed_config = self.hf_ds_config.config\\n self.deepspeed_config[\"steps_per_print\"] = float(\"inf\") # this will stop deepspeed from logging @ stdout\\n if self.zero3_init_flag is None:\\n- self.zero3_init_flag = os.environ.get(\"ACCELERATE_DEEPSPEED_ZERO3_INIT\", \"false\") == \"true\"\\n+ self.zero3_init_flag = os.environ.get(\"ACCELERATE_DEEPSPEED_ZERO3_INIT\", \"true\") == \"true\"',\n", - " 'from_author': False},\n", - " {'body': 'these are the exact outputs from the new config #830. Added new lines. ',\n", - " 'diff_hunk': '@@ -395,6 +395,194 @@ We will look at the changes needed in the code when using these.\\n based on model, dataloaders, dummy optimizer and dummy schedulers provided to `prepare` method. \\n Only the `auto` fields specified in above examples are handled by `prepare` method and the rest have to be explicitly specified by the user.\\n \\n+**Things to note when using DeepSpeed Config File**\\n+\\n+Below is a smaple script using `deepspeed_config_file` in different scenarios.\\n+\\n+Code `test.py`:\\n+\\n+```python\\n+from accelerate import Accelerator\\n+from accelerate.state import AcceleratorState\\n+\\n+\\n+def main():\\n+ accelerator = Accelerator()\\n+ accelerator.print(f\"{AcceleratorState()}\")\\n+\\n+\\n+if __name__ == \"__main__\":\\n+ main()\\n+```\\n+\\n+**Scenario 1**: Manually tampered accelerate config file having `deepspeed_config_file` along with other entries.\\n+\\n+1. `accelerate config`:\\n+\\n+```yaml\\n+command_file: null\\n+commands: null\\n+compute_environment: LOCAL_MACHINE\\n+deepspeed_config:\\n+ gradient_accumulation_steps: 1\\n+ gradient_clipping: 1.0\\n+ offload_optimizer_device: \\'cpu\\'\\n+ offload_param_device: \\'cpu\\'\\n+ zero3_init_flag: true\\n+ zero3_save_16bit_model: true\\n+ zero_stage: 3\\n+ deepspeed_config_file: \\'ds_config.json\\'\\n+distributed_type: DEEPSPEED\\n+downcast_bf16: \\'no\\'\\n+dynamo_backend: \\'NO\\'\\n+fsdp_config: {}\\n+gpu_ids: null\\n+machine_rank: 0\\n+main_process_ip: null\\n+main_process_port: null\\n+main_training_function: main\\n+megatron_lm_config: {}\\n+num_machines: 1\\n+num_processes: 2\\n+rdzv_backend: static\\n+same_network: true\\n+tpu_name: null\\n+tpu_zone: null\\n+use_cpu: false\\n+```\\n+\\n+2. `ds_config.json`:\\n+\\n+```json\\n+{\\n+ \"bf16\": {\\n+ \"enabled\": true\\n+ },\\n+ \"zero_optimization\": {\\n+ \"stage\": 3,\\n+ \"stage3_gather_16bit_weights_on_model_save\": false,\\n+ \"offload_optimizer\": {\\n+ \"device\": \"none\"\\n+ },\\n+ \"offload_param\": {\\n+ \"device\": \"none\"\\n+ }\\n+ },\\n+ \"gradient_clipping\": 1.0,\\n+ \"train_batch_size\": \"auto\",\\n+ \"train_micro_batch_size_per_gpu\": \"auto\",\\n+ \"gradient_accumulation_steps\": 10,\\n+ \"steps_per_print\": 2000000\\n+}\\n+```\\n+\\n+3. Output of `accelerate launch test.py`:\\n+\\n+```bash\\n+ValueError: When using `deepspeed_config_file`, the following accelerate config variables will be ignored: \\n+[\\'gradient_accumulation_steps\\', \\'gradient_clipping\\', \\'zero_stage\\', \\'offload_optimizer_device\\', \\'offload_param_device\\', \\n+\\'zero3_save_16bit_model\\', \\'mixed_precision\\'].\\n+Please specify them appropriately in the DeepSpeed config file.\\n+If you are using an accelerate config file, remove others config variables mentioned in the above specified list.\\n+The easiest method is to create a new config following the questionnaire via `accelerate config`.\\n+It will only ask for the necessary config variables when using `deepspeed_config_file`.\\n+```\\n+\\n+**Scenario 2**: Use the solution of the error to create new accelerate config and check that no ambiguity error is now thrown.\\n+\\n+1. Run `accelerate config`:\\n+\\n+```bash\\n+$ accelerate config\\n+-------------------------------------------------------------------------------------------------------------------------------In which compute environment are you running?\\n+This machine \\n+-------------------------------------------------------------------------------------------------------------------------------Which type of machine are you using? ',\n", - " 'from_author': True},\n", - " {'body': \"Hello, as stas mentioned in Issue #922 \\r\\n\\r\\n> this flag should be True by default as zero stage 3 is for large models, it's very unlikely the user will be able to load those models w/o zero.Init\",\n", - " 'diff_hunk': '@@ -450,15 +461,17 @@ def __post_init__(self):\\n if self.gradient_clipping:\\n config[\"gradient_clipping\"] = self.gradient_clipping\\n self.hf_ds_config = HfDeepSpeedConfig(config)\\n+\\n self.deepspeed_config = self.hf_ds_config.config\\n self.deepspeed_config[\"steps_per_print\"] = float(\"inf\") # this will stop deepspeed from logging @ stdout\\n if self.zero3_init_flag is None:\\n- self.zero3_init_flag = os.environ.get(\"ACCELERATE_DEEPSPEED_ZERO3_INIT\", \"false\") == \"true\"\\n+ self.zero3_init_flag = os.environ.get(\"ACCELERATE_DEEPSPEED_ZERO3_INIT\", \"true\") == \"true\"',\n", - " 'from_author': True},\n", - " {'body': 'Is this only used if zero3 is enabled in the rest of the code? It should at least only default to True when Zero-3 is enabled otherwise.',\n", - " 'diff_hunk': '@@ -450,15 +461,17 @@ def __post_init__(self):\\n if self.gradient_clipping:\\n config[\"gradient_clipping\"] = self.gradient_clipping\\n self.hf_ds_config = HfDeepSpeedConfig(config)\\n+\\n self.deepspeed_config = self.hf_ds_config.config\\n self.deepspeed_config[\"steps_per_print\"] = float(\"inf\") # this will stop deepspeed from logging @ stdout\\n if self.zero3_init_flag is None:\\n- self.zero3_init_flag = os.environ.get(\"ACCELERATE_DEEPSPEED_ZERO3_INIT\", \"false\") == \"true\"\\n+ self.zero3_init_flag = os.environ.get(\"ACCELERATE_DEEPSPEED_ZERO3_INIT\", \"true\") == \"true\"',\n", - " 'from_author': False},\n", - " {'body': 'Yes, only used when zero3 is enabled else will give a warning and gets set to False as mentioned in on of the above threads. Done, defaults to True only for stage 3.',\n", - " 'diff_hunk': '@@ -450,15 +461,17 @@ def __post_init__(self):\\n if self.gradient_clipping:\\n config[\"gradient_clipping\"] = self.gradient_clipping\\n self.hf_ds_config = HfDeepSpeedConfig(config)\\n+\\n self.deepspeed_config = self.hf_ds_config.config\\n self.deepspeed_config[\"steps_per_print\"] = float(\"inf\") # this will stop deepspeed from logging @ stdout\\n if self.zero3_init_flag is None:\\n- self.zero3_init_flag = os.environ.get(\"ACCELERATE_DEEPSPEED_ZERO3_INIT\", \"false\") == \"true\"\\n+ self.zero3_init_flag = os.environ.get(\"ACCELERATE_DEEPSPEED_ZERO3_INIT\", \"true\") == \"true\"',\n", - " 'from_author': True}],\n", - " 'context': [{'body': '_The documentation is not available anymore as the PR was closed or merged._',\n", - " 'from_author': False},\n", - " {'body': 'I tried this branch, getting:\\r\\n\\r\\n```\\r\\nTraceback (most recent call last):\\r\\n File \"m4/training/main.py\", line 47, in \\r\\n accelerator = Accelerator(\\r\\n File \"/mnt/nvme0/code/huggingface/accelerate-master/src/accelerate/accelerator.py\", line 246, in __init__\\r\\n DeepSpeedPlugin() if os.environ.get(\"ACCELERATE_USE_DEEPSPEED\", \"false\") == \"true\" else None\\r\\n File \"\", line 12, in __init__\\r\\n File \"/mnt/nvme0/code/huggingface/accelerate-master/src/accelerate/utils/dataclasses.py\", line 412, in __post_init__\\r\\n self._deepspeed_config_checks()\\r\\n File \"/mnt/nvme0/code/huggingface/accelerate-master/src/accelerate/utils/dataclasses.py\", line 560, in _deepspeed_config_checks\\r\\n if ds_gradient_accumulation_steps != int(accelerate_gradient_accumulation_steps):\\r\\nValueError: invalid literal for int() with base 10: \\'None\\'\\r\\n```\\r\\n\\r\\nconfigs:\\r\\n\\r\\n```\\r\\n{\\r\\n \"fp16\": {\\r\\n \"enabled\": true,\\r\\n \"auto_cast\": true,\\r\\n \"loss_scale\": 0.0,\\r\\n \"initial_scale_power\": 10,\\r\\n \"loss_scale_window\": 1000,\\r\\n \"hysteresis\": 2,\\r\\n \"min_loss_scale\": 1\\r\\n },\\r\\n \"zero_optimization\": {\\r\\n \"stage\": 3,\\r\\n \"allgather_partitions\": true,\\r\\n \"allgather_bucket_size\": 5e8,\\r\\n \"overlap_comm\": false,\\r\\n \"reduce_scatter\": true,\\r\\n \"reduce_bucket_size\": \"auto\",\\r\\n \"contiguous_gradients\": true,\\r\\n \"stage3_gather_16bit_weights_on_model_save\": false,\\r\\n \"offload_optimizer\": {\\r\\n \"device\": \"none\"\\r\\n },\\r\\n \"offload_param\": {\\r\\n \"device\": \"none\"\\r\\n }\\r\\n },\\r\\n \"gradient_clipping\": 1.0,\\r\\n \"gradient_accumulation_steps\": 2,\\r\\n \"train_batch_size\": \"auto\",\\r\\n \"train_micro_batch_size_per_gpu\": \"auto\",\\r\\n \"steps_per_print\": 2000000\\r\\n}\\r\\n\\r\\ncompute_environment: LOCAL_MACHINE\\r\\ndeepspeed_config:\\r\\n deepspeed_multinode_launcher: standard\\r\\n deepspeed_config_file: ./configs/vopt-large-z3/ds_config.json\\r\\n zero3_init_flag: true\\r\\ndistributed_type: DEEPSPEED\\r\\nfsdp_config: {}\\r\\nmachine_rank: 0\\r\\nmain_process_ip: null\\r\\nmain_process_port: null\\r\\nmain_training_function: main\\r\\nnum_machines: 1\\r\\nnum_processes: 1\\r\\n#num_machines: 20\\r\\n#num_processes: 80\\r\\nuse_cpu: false\\r\\n\\r\\n\\r\\n```',\n", - " 'from_author': False},\n", - " {'body': '@pacman100 default needs to be 1 instead of `None` :)',\n", - " 'from_author': False},\n", - " {'body': \"Also clarifying defaults for args in this PR. Now, `accelerate launch --use_deepspeed --help` shows below output wherein default value info is specified `If unspecified, will default to *`:\\r\\n```\\r\\n...\\r\\n\\r\\nDeepSpeed Arguments:\\r\\n Arguments related to DeepSpeed.\\r\\n\\r\\n --deepspeed_config_file DEEPSPEED_CONFIG_FILE\\r\\n DeepSpeed config file.\\r\\n --zero_stage ZERO_STAGE\\r\\n DeepSpeed's ZeRO optimization stage (useful only when `use_deepspeed` flag\\r\\n is passed). If unspecified, will default to `2`.\\r\\n --offload_optimizer_device OFFLOAD_OPTIMIZER_DEVICE\\r\\n Decides where (none|cpu|nvme) to offload optimizer states (useful only\\r\\n when `use_deepspeed` flag is passed). If unspecified, will default to\\r\\n `none`.\\r\\n --offload_param_device OFFLOAD_PARAM_DEVICE\\r\\n Decides where (none|cpu|nvme) to offload parameters (useful only when\\r\\n `use_deepspeed` flag is passed). If unspecified, will default to `none`.\\r\\n --gradient_accumulation_steps GRADIENT_ACCUMULATION_STEPS\\r\\n No of gradient_accumulation_steps used in your training script (useful\\r\\n only when `use_deepspeed` flag is passed). If unspecified, will default to\\r\\n `1`.\\r\\n --gradient_clipping GRADIENT_CLIPPING\\r\\n gradient clipping value used in your training script (useful only when\\r\\n `use_deepspeed` flag is passed). If unspecified, will default to `1.0`.\\r\\n --zero3_init_flag ZERO3_INIT_FLAG\\r\\n Decides Whether (true|false) to enable `deepspeed.zero.Init` for\\r\\n constructing massive models. Only applicable with DeepSpeed ZeRO Stage-3.\\r\\n If unspecified, will default to `true`.\\r\\n --zero3_save_16bit_model ZERO3_SAVE_16BIT_MODEL\\r\\n Decides Whether (true|false) to save 16-bit model weights when using ZeRO\\r\\n Stage-3. Only applicable with DeepSpeed ZeRO Stage-3. If unspecified, will\\r\\n default to `false`.\\r\\n --deepspeed_hostfile DEEPSPEED_HOSTFILE\\r\\n DeepSpeed hostfile for configuring multi-node compute resources.\\r\\n --deepspeed_exclusion_filter DEEPSPEED_EXCLUSION_FILTER\\r\\n DeepSpeed exclusion filter string when using mutli-node setup.\\r\\n --deepspeed_inclusion_filter DEEPSPEED_INCLUSION_FILTER\\r\\n DeepSpeed inclusion filter string when using mutli-node setup.\\r\\n --deepspeed_multinode_launcher DEEPSPEED_MULTINODE_LAUNCHER\\r\\n DeepSpeed multi-node launcher to use. If unspecified, will default to\\r\\n `pdsh`.\\r\\n\\r\\n```\",\n", - " 'from_author': True},\n", - " {'body': \"> ```shell\\r\\n> ValueError: When using `deepspeed_config_file`, the following accelerate config variables will be \\r\\n> ignored: ['gradient_accumulation_steps', 'gradient_clipping', 'zero_stage', \\r\\n> 'offload_optimizer_device', 'offload_param_device', 'zero3_save_16bit_model', 'mixed_precision'].\\r\\n> Please specify them appropriately in the DeepSpeed config file.\\r\\n> If you are using accelerate config file, set `mixed_precision=no` and remove others config variables\\r\\n> mentioned in the above specified list; else don't specify these config variables in `accelerate \\r\\n> launch` command. \\r\\n> The easiest method is to create new config following the questionnaire via `accelerate config`.\\r\\n> It will only ask for the necessary config variables when using `deepspeed_config_file`.\\r\\n> ```\\r\\n\\r\\nNow, the error will be this\",\n", - " 'from_author': True},\n", - " {'body': '> but really what we want is one definitive source of information.\\r\\n\\r\\nHello @stas00, let us know if this addresses the issue. When `deepspeed_config_file` is specified, it is the single definitive source of information and error is raised when duplicates are found either in accelerate config file or through the arguments of `accelerate launch` command',\n", - " 'from_author': True},\n", - " {'body': \"> Will this still work with `auto` entries in `ds_config.json`\\r\\n\\r\\nHello Stas, they will work as before because nothing is changing other than the fact that an error is raised if their is any duplicate entry in `accelerate config` with things that are in `deepspeed_config_file`. Here, these fields which can be duplicates are never `auto` in either case and hence fields that had `auto` support won't be impacted.\",\n", - " 'from_author': True},\n", - " {'body': \"Thank you for confirming that, @pacman100 - let me try it out as it's easier to see things in action.\",\n", - " 'from_author': False},\n", - " {'body': 'But this shows otherwise:\\r\\n```\\r\\n File \"/mnt/nvme0/code/huggingface/accelerate-master/src/accelerate/utils/dataclasses.py\", line 409, in __post_init__\\r\\n raise ValueError(\"gradient_accumulation_steps cannot be set to \\'auto\\' in the DeepSpeed config.\")\\r\\nValueError: gradient_accumulation_steps cannot be set to \\'auto\\' in the DeepSpeed config.\\r\\nERROR:torch.distributed.elastic.multiprocessing.api:failed (exitcode: 1) local_rank: 0 (pid\\r\\n```\\r\\n\\r\\nwhy can\\'t it be `auto`? I\\'d expect it to be set either (1) to the default value or (2) the value passed via the `--gradient_accumulation_steps` cmd line arg.\\r\\n\\r\\nI wonder if we are having a miscommunication here. I brought up the issue of duplicity of the 2 styles of settings the ds config, since accelerate used its own config files from the beginning, but I have never suggested that setting values via cmd line args support should be dropped.',\n", - " 'from_author': False},\n", - " {'body': \"Regarding `gradient_accumulation_steps` and `auto`, that piece of code wasn't changed at all and behaviour is the same as before. If one doesn't specify that entry in the config file, it is set to default of `1`, if it is `auto` error is raised else thale given value is used. When not using `deepspeed_config_file`, it is asked when using `accelerate config` cmd.\",\n", - " 'from_author': True},\n", - " {'body': '> Regarding `gradient_accumulation_steps` and `auto`, that piece of code wasn\\'t changed at all and behaviour is the same as before. If one doesn\\'t specify that entry in the config file, it is set to default of `1`, if it is `auto` error is raised else thale given value is used. When not using `deepspeed_config_file`, it is asked when using `accelerate config` cmd.\\r\\n\\r\\nOK, so your logic is different from HF Trainer then.\\r\\n\\r\\nThe HF Trainer was directing cmd args into the config file\\'s `auto` values so that the user could override them via cmd line args.\\r\\n\\r\\nI\\'m not saying the 2 logics have to match. If I am not mistaken the accelerate logic is less flexible, but it\\'s ok if you prefer it that way. \\r\\n\\r\\nIn HF Trainer the `auto` feature was designed to be used:\\r\\n1. when the value can\\'t be known before running - \"boot\"-time calculated configs\\r\\n2. values to be set via cmd line args and defaults',\n", - " 'from_author': False},\n", - " {'body': \"As accelerate is meant to work with all models apart from Transformers and user being in control of the training loop, they are in charge of all the arguments and the naming convention of arguments will be different across different users. On the other hand, in Trainer, users are restricted to a given args set and as such those can be used to fill the DeepSpeed config due to clear mapping between args and DS config params. The idea is that artifacts sent to `accelerator.prepare` have the params required by DS config and we exactly know the mapping between them and makes filling of the params independent of the user's training loop and their arguments naming convention.\\n\\nIn accelerate, the `auto` values are those that can be filled via artifacts being sent to `accelerator.prepare` as all the other places, the user has complete control over the training loop, argument naming and filling. The user still has flexibility to fill in all the `auto` values themselves as mentioned here https://github.com/huggingface/accelerate/pull/676#issuecomment-1318139353\",\n", - " 'from_author': True},\n", - " {'body': \"> As accelerate is meant to work with all models apart from Transformers and user being in control of the training loop, they are in charge of all the arguments and the naming convention of arguments will be different across different users. On the other hand, in Trainer, users are restricted to a given args set and as such those can be used to fill the DeepSpeed config due to clear mapping between args and DS config params. The idea is that artifacts sent to `accelerator.prepare` have the params required by DS config and we exactly know the mapping between them and makes filling of the params independent of the user's training loop and their arguments naming convention.\\r\\n\\r\\nThank you for explaining this to me, Sourab, but I'm having a hard time following how Accelerate is any different from HF Trainer wrt sending cmd line arg values to the unfilled out config values in ds_config. e.g. the Accelerate launcher provides an explicit list of cmd line args for the deepspeed use. There is a 1:1 mapping here as well. Could you please explain how is this different from the HF Trainer?\\r\\n\\r\\nBut as I said above it's totally fine if you prefer to do it this way, Sourab. This doesn't prevent users from doing what they need.\\r\\n\\r\\n> In accelerate, the auto values are those that can be filled via artifacts being sent to accelerator.prepare as all the other places, the user has complete control over the training loop, argument naming and filling. The user still has flexibility to fill in all the auto values themselves as mentioned here https://github.com/huggingface/accelerate/pull/676#issuecomment-1318139353\\r\\n\\r\\nUnderstood. more work, but doable. Thank you for the explanations.\\r\\n\\r\\n\",\n", - " 'from_author': False},\n", - " {'body': '> Could you please explain how is this different from the HF Trainer?\\r\\n\\r\\nUsers can have `bs` or `batch_size` as cmd arguments in their code and as such we can\\'t fill ds config\\'s `train_micro_batch_size_per_gpu` whereas Trainer always maps `args.per_device_train_batch_size` to it. The same reason can go for other configs. \\r\\n\\r\\nPlease note that `accelerate launch` cmd args are primarily used for setting accelerate config\\'s deespseed fields rather than setting `auto` values of `deepspeed_config_file`. Now I understood that you meant using `accelerate launch` cmd args for filling in `auto` values of `deepspeed_config_file` and I\\'ve made respective changes.\\r\\n\\r\\nLatest changes:\\r\\n\\r\\nCode `test.py`:\\r\\n```python\\r\\nfrom accelerate import Accelerator\\r\\nfrom accelerate.state import AcceleratorState\\r\\n\\r\\ndef main():\\r\\n accelerator = Accelerator()\\r\\n accelerator.print(f\"{AcceleratorState()}\")\\r\\nif __name__ == \"__main__\":\\r\\n main()\\r\\n```\\r\\n\\r\\nScenario 1: manually tampered accelerate config file having `deepspeed_config_file` along with other entries.\\r\\n\\r\\n1. `accelerate config`:\\r\\n```yaml\\r\\ncommand_file: null\\r\\ncommands: null\\r\\ncompute_environment: LOCAL_MACHINE\\r\\ndeepspeed_config:\\r\\n gradient_accumulation_steps: 1\\r\\n gradient_clipping: 1.0\\r\\n offload_optimizer_device: \\'cpu\\'\\r\\n offload_param_device: \\'cpu\\'\\r\\n zero3_init_flag: true\\r\\n zero3_save_16bit_model: true\\r\\n zero_stage: 3\\r\\n deepspeed_config_file: \\'ds_config.json\\'\\r\\ndistributed_type: DEEPSPEED\\r\\ndowncast_bf16: \\'no\\'\\r\\ndynamo_backend: \\'NO\\'\\r\\nfsdp_config: {}\\r\\ngpu_ids: null\\r\\nmachine_rank: 0\\r\\nmain_process_ip: null\\r\\nmain_process_port: null\\r\\nmain_training_function: main\\r\\nmegatron_lm_config: {}\\r\\nnum_machines: 1\\r\\nnum_processes: 2\\r\\nrdzv_backend: static\\r\\nsame_network: true\\r\\ntpu_name: null\\r\\ntpu_zone: null\\r\\nuse_cpu: false\\r\\n```\\r\\n\\r\\n2. `ds_config.json`:\\r\\n```json\\r\\n{\\r\\n \"bf16\": {\\r\\n \"enabled\": true\\r\\n },\\r\\n \"zero_optimization\": {\\r\\n \"stage\": 3,\\r\\n \"stage3_gather_16bit_weights_on_model_save\": false,\\r\\n \"offload_optimizer\": {\\r\\n \"device\": \"none\"\\r\\n },\\r\\n \"offload_param\": {\\r\\n \"device\": \"none\"\\r\\n }\\r\\n },\\r\\n \"gradient_clipping\": 1.0,\\r\\n \"train_batch_size\": \"auto\",\\r\\n \"train_micro_batch_size_per_gpu\": \"auto\",\\r\\n \"gradient_accumulation_steps\": 10,\\r\\n \"steps_per_print\": 2000000\\r\\n}\\r\\n```\\r\\n\\r\\n3. Output of `accelerate launch test.py`:\\r\\n```\\r\\nValueError: When using `deepspeed_config_file`, the following accelerate config variables will be ignored: \\r\\n[\\'gradient_accumulation_steps\\', \\'gradient_clipping\\', \\'zero_stage\\', \\'offload_optimizer_device\\', \\'offload_param_device\\', \\r\\n\\'zero3_save_16bit_model\\', \\'mixed_precision\\'].\\r\\nPlease specify them appropriately in the DeepSpeed config file.\\r\\nIf you are using an accelerate config file, remove others config variables mentioned in the above specified list.\\r\\nThe easiest method is to create a new config following the questionnaire via `accelerate config`.\\r\\nIt will only ask for the necessary config variables when using `deepspeed_config_file`.\\r\\n```\\r\\n\\r\\nScenario 2: Use the solution of the error to create new accelerate config and check that no ambiguity error is now thrown.\\r\\n1. Run `accelerate config`:\\r\\n```\\r\\n$ accelerate config\\r\\n-------------------------------------------------------------------------------------------------------------------------------In which compute environment are you running?\\r\\nThis machine \\r\\n-------------------------------------------------------------------------------------------------------------------------------Which type of machine are you using? \\r\\nmulti-GPU \\r\\nHow many different machines will you use (use more than 1 for multi-node training)? [1]: \\r\\nDo you wish to optimize your script with torch dynamo?[yes/NO]: \\r\\nDo you want to use DeepSpeed? [yes/NO]: yes \\r\\nDo you want to specify a json file to a DeepSpeed config? [yes/NO]: yes \\r\\nPlease enter the path to the json DeepSpeed config file: ds_config.json \\r\\nDo you want to enable `deepspeed.zero.Init` when using ZeRO Stage-3 for constructing massive models? [yes/NO]: yes\\r\\nHow many GPU(s) should be used for distributed training? [1]:4\\r\\naccelerate configuration saved at ds_config_sample.yaml\\r\\n```\\r\\n\\r\\n2. `accelerate config`:\\r\\n```yaml\\r\\ncompute_environment: LOCAL_MACHINE\\r\\ndeepspeed_config:\\r\\n deepspeed_config_file: ds_config.json\\r\\n zero3_init_flag: true\\r\\ndistributed_type: DEEPSPEED\\r\\ndowncast_bf16: \\'no\\'\\r\\ndynamo_backend: \\'NO\\'\\r\\nfsdp_config: {}\\r\\nmachine_rank: 0\\r\\nmain_training_function: main\\r\\nmegatron_lm_config: {}\\r\\nnum_machines: 1\\r\\nnum_processes: 4\\r\\nrdzv_backend: static\\r\\nsame_network: true\\r\\nuse_cpu: false\\r\\n```\\r\\n\\r\\n3. Output of `accelerate launch test.py`:\\r\\n```\\r\\nDistributed environment: DEEPSPEED Backend: nccl\\r\\nNum processes: 4\\r\\nProcess index: 0\\r\\nLocal process index: 0\\r\\nDevice: cuda:0\\r\\nMixed precision type: bf16\\r\\nds_config: {\\'bf16\\': {\\'enabled\\': True}, \\'zero_optimization\\': {\\'stage\\': 3, \\'stage3_gather_16bit_weights_on_model_save\\': False, \\'offload_optimizer\\': {\\'device\\': \\'none\\'}, \\'offload_param\\': {\\'device\\': \\'none\\'}}, \\'gradient_clipping\\': 1.0, \\'train_batch_size\\': \\'auto\\', \\'train_micro_batch_size_per_gpu\\': \\'auto\\', \\'gradient_accumulation_steps\\': 10, \\'steps_per_print\\': inf, \\'fp16\\': {\\'enabled\\': False}}\\r\\n```\\r\\n\\r\\nScenario 3: Setting the `accelerate launch` cmd args related to deepspeed as `auto` in `deepspeed_config_file` and check that things work as expected.\\r\\n1. new `ds_config.json` with `auto` for the `accelerate launch` deepspeed cmd args:\\r\\n```json\\r\\n{\\r\\n \"bf16\": {\\r\\n \"enabled\": \"auto\"\\r\\n },\\r\\n \"zero_optimization\": {\\r\\n \"stage\": \"auto\",\\r\\n \"stage3_gather_16bit_weights_on_model_save\": \"auto\",\\r\\n \"offload_optimizer\": {\\r\\n \"device\": \"auto\"\\r\\n },\\r\\n \"offload_param\": {\\r\\n \"device\": \"auto\"\\r\\n }\\r\\n },\\r\\n \"gradient_clipping\": \"auto\",\\r\\n \"train_batch_size\": \"auto\",\\r\\n \"train_micro_batch_size_per_gpu\": \"auto\",\\r\\n \"gradient_accumulation_steps\": \"auto\",\\r\\n \"steps_per_print\": 2000000\\r\\n}\\r\\n```\\r\\n\\r\\n2. Output of `accelerate launch --mixed_precision=\"fp16\" --zero_stage=3 --gradient_accumulation_steps=5 --gradient_clipping=1.0 --offload_param_device=\"cpu\" --offload_optimizer_device=\"nvme\" --zero3_save_16bit_model=\"true\" test.py`:\\r\\n```bash\\r\\nDistributed environment: DEEPSPEED Backend: nccl\\r\\nNum processes: 4\\r\\nProcess index: 0\\r\\nLocal process index: 0\\r\\nDevice: cuda:0\\r\\nMixed precision type: fp16\\r\\nds_config: {\\'bf16\\': {\\'enabled\\': False}, \\'zero_optimization\\': {\\'stage\\': 3, \\'stage3_gather_16bit_weights_on_model_save\\': True, \\'offload_optimizer\\': {\\'device\\': \\'nvme\\'}, \\'offload_param\\': {\\'device\\': \\'cpu\\'}}, \\'gradient_clipping\\': 1.0, \\'train_batch_size\\': \\'auto\\', \\'train_micro_batch_size_per_gpu\\': \\'auto\\', \\'gradient_accumulation_steps\\': 5, \\'steps_per_print\\': inf, \\'fp16\\': {\\'enabled\\': True, \\'auto_cast\\': True}}\\r\\n```\\r\\n\\r\\n**Note**: Remaining `auto` values are handled in `accelerator.prepare()` call.\\r\\n\\r\\n\\r\\n',\n", - " 'from_author': True},\n", - " {'body': \"Looks fantastic, Sourab! Thank you for the improvements and taking the time to layout out the different scenarios - if I'm not mistaken those would make for perfect additions to the documentation if it resonates. (at the very least the last one to demo how `auto` values work and why would one want to use those).\\r\\n\\r\\nBTW, the config generates things like:\\r\\n```\\r\\nfsdp_config: {}\\r\\nmegatron_lm_config: {}\\r\\n```\\r\\nwhy not just skip parts that the user hasn't asked for? It just makes the config scarier than it is, no? I'm asking since when I first looked at it I wasn't a all sure which of the empty placeholders were safe to remove and which aren't. My personal preference is for active config - that is to only ever list config entries that I work with and any defaults should be just that defaults and not be listed at all. Which I suppose isn't the case with typical configs where everything is listed out whether it's being used or not.\\r\\n\\r\\nAnd I can of course remove all those, so definitely it's not an issue, I'm just asking if my thinking resonates with you.\",\n", - " 'from_author': False},\n", - " {'body': \"Sourab, I found one more ambiguous combo in one of our tests:\\r\\n\\r\\n```\\r\\n zero3_init_flag: true\\r\\n zero_stage: 2\\r\\n```\\r\\n\\r\\nThis combo is quietly getting accepted. I'm concerned that a developer may see `zero3_init_flag: true` and think it's zero3?\\r\\n\\r\\nDo you think accelerate should assert when `zero3_init_flag==True` is used with `stage < 3`?\",\n", - " 'from_author': False},\n", - " {'body': 'Hello @stas00, with current setup below warning is given which I think is fine:\\r\\n```bash\\r\\nUserWarning: DeepSpeed Zero3 Init flag is only applicable for ZeRO Stage 3. Setting it to False.\\r\\n```',\n", - " 'from_author': True},\n", - " {'body': 'oh boy. I didn\\'t see it. :(\\r\\n\\r\\nIf a tree falls in a forest and no one is around to hear it, does it make a sound?\\r\\n\\r\\nI guess I need to start using this pragma to turn warnings into errors, but then some warnings can\\'t be acted upon :(\\r\\n\\r\\n```\\r\\nimport warnings\\r\\nwarnings.filterwarnings(\"error\")\\r\\n```\\r\\n\\r\\nThis is for example an even larger issue for tests, where distributed setup hides most warnings or again there are too many of warnings to see anything.\\r\\n',\n", - " 'from_author': False}],\n", - " 'url': 'https://api.github.com/repos/huggingface/accelerate/pulls/941'},\n", - " 1175473268: {'diff': 'diff --git a/docs/source/quicktour.mdx b/docs/source/quicktour.mdx\\nindex 6ea99f9bd..13a8d7afa 100644\\n--- a/docs/source/quicktour.mdx\\n+++ b/docs/source/quicktour.mdx\\n@@ -206,7 +206,7 @@ Now that this is done, you can run your script with the following command:\\n accelerate launch path_to_script.py --args_for_the_script\\n ```\\n \\n-If you stored the config file in a non-default location, you can indicate it to the launcher like his:\\n+If you stored the config file in a non-default location, you can indicate it to the launcher like this:\\n \\n ```bash\\n accelerate launch --config_file path_to_config.yaml path_to_script.py --args_for_the_script\\n',\n", - " 'code_comments': [],\n", - " 'context': [{'body': '_The documentation is not available anymore as the PR was closed or merged._',\n", - " 'from_author': False},\n", - " {'body': 'Thanks for the fix! ', 'from_author': False}],\n", - " 'url': 'https://api.github.com/repos/huggingface/accelerate/pulls/939'},\n", - " 1174853834: {'diff': 'diff --git a/src/accelerate/data_loader.py b/src/accelerate/data_loader.py\\nindex 8a45856cd..7dafed0e5 100644\\n--- a/src/accelerate/data_loader.py\\n+++ b/src/accelerate/data_loader.py\\n@@ -711,7 +711,9 @@ def prepare_data_loader(\\n # Need to provide batch_size as batch_sampler is None for Iterable dataset\\n if new_batch_sampler is None:\\n kwargs[\"drop_last\"] = dataloader.drop_last\\n- kwargs[\"batch_size\"] = dataloader.batch_size // num_processes if split_batches else dataloader.batch_size\\n+ kwargs[\"batch_size\"] = (\\n+ dataloader.batch_size // num_processes if split_batches and not dispatch_batches else dataloader.batch_size\\n+ )\\n \\n if dispatch_batches:\\n kwargs.pop(\"generator\")\\n',\n", - " 'code_comments': [],\n", - " 'context': [{'body': '_The documentation is not available anymore as the PR was closed or merged._',\n", - " 'from_author': False},\n", - " {'body': 'Looks like the right fix to me, thanks!', 'from_author': False}],\n", - " 'url': 'https://api.github.com/repos/huggingface/accelerate/pulls/937'},\n", - " 1173355279: {'diff': 'diff --git a/src/accelerate/accelerator.py b/src/accelerate/accelerator.py\\nindex ff880cffd..ec5041bfa 100644\\n--- a/src/accelerate/accelerator.py\\n+++ b/src/accelerate/accelerator.py\\n@@ -473,17 +473,7 @@ def is_last_process(self):\\n \\n @property\\n def mixed_precision(self):\\n- if self.distributed_type == DistributedType.DEEPSPEED:\\n- config = self.state.deepspeed_plugin.deepspeed_config\\n- if config.get(\"fp16\", {}).get(\"enabled\", False):\\n- mixed_precision = \"fp16\"\\n- elif config.get(\"bf16\", {}).get(\"enabled\", False):\\n- mixed_precision = \"bf16\"\\n- else:\\n- mixed_precision = \"no\"\\n- else:\\n- mixed_precision = self.state.mixed_precision\\n- return mixed_precision\\n+ return self.state.mixed_precision\\n \\n def on_main_process(func):\\n \"\"\"\\ndiff --git a/src/accelerate/state.py b/src/accelerate/state.py\\nindex a8a5e7960..31a6d64a2 100644\\n--- a/src/accelerate/state.py\\n+++ b/src/accelerate/state.py\\n@@ -104,7 +104,7 @@ def __init__(\\n self.local_process_index = int(os.environ.get(\"LOCAL_RANK\", -1))\\n self.device = torch.device(\"cuda\", self.local_process_index)\\n torch.cuda.set_device(self.device)\\n- self.mixed_precision = mixed_precision\\n+ self._mixed_precision = mixed_precision\\n elif is_tpu_available() and not cpu:\\n self.distributed_type = DistributedType.TPU\\n self.num_processes = xm.xrt_world_size()\\n@@ -120,7 +120,7 @@ def __init__(\\n os.environ[\"XLA_USE_BF16\"] = str(1)\\n os.environ[\"XLA_DOWNCAST_BF16\"] = str(0)\\n self.downcast_bfloat = False\\n- self.mixed_precision = mixed_precision\\n+ self._mixed_precision = mixed_precision\\n elif os.environ.get(\"ACCELERATE_USE_DEEPSPEED\", \"false\") == \"true\" and not cpu:\\n assert (\\n is_deepspeed_available()\\n@@ -142,7 +142,7 @@ def __init__(\\n self.local_process_index = int(os.environ.get(\"LOCAL_RANK\", -1))\\n self.device = torch.device(\"cuda\", self.local_process_index)\\n torch.cuda.set_device(self.device)\\n- self.mixed_precision = \"no\" # deepspeed handles mixed_precision using deepspeed_config\\n+ self._mixed_precision = \"no\" # deepspeed handles mixed_precision using deepspeed_config\\n self.deepspeed_plugin = deepspeed_plugin\\n elif int(os.environ.get(\"LOCAL_RANK\", -1)) != -1 and not cpu:\\n self.distributed_type = DistributedType.MULTI_GPU\\n@@ -154,15 +154,15 @@ def __init__(\\n self.local_process_index = int(os.environ.get(\"LOCAL_RANK\", -1))\\n self.device = torch.device(\"cuda\", self.local_process_index)\\n torch.cuda.set_device(self.device)\\n- self.mixed_precision = mixed_precision\\n+ self._mixed_precision = mixed_precision\\n if os.environ.get(\"ACCELERATE_USE_FSDP\", \"false\") == \"true\":\\n self.distributed_type = DistributedType.FSDP\\n- if self.mixed_precision != \"no\":\\n- fsdp_plugin.set_mixed_precision(self.mixed_precision)\\n+ if self._mixed_precision != \"no\":\\n+ fsdp_plugin.set_mixed_precision(self._mixed_precision)\\n self.fsdp_plugin = fsdp_plugin\\n if os.environ.get(\"ACCELERATE_USE_MEGATRON_LM\", \"false\") == \"true\":\\n self.distributed_type = DistributedType.MEGATRON_LM\\n- megatron_lm_plugin.set_mixed_precision(self.mixed_precision)\\n+ megatron_lm_plugin.set_mixed_precision(self._mixed_precision)\\n self.megatron_lm_plugin = megatron_lm_plugin\\n elif get_int_from_env([\"PMI_SIZE\", \"OMPI_COMM_WORLD_SIZE\", \"MV2_COMM_WORLD_SIZE\", \"WORLD_SIZE\"], 1) > 1:\\n self.distributed_type = DistributedType.MULTI_CPU\\n@@ -204,7 +204,7 @@ def __init__(\\n self.process_index = torch.distributed.get_rank()\\n self.local_process_index = local_rank\\n self.device = torch.device(\"cpu\")\\n- self.mixed_precision = mixed_precision\\n+ self._mixed_precision = mixed_precision\\n else:\\n self.distributed_type = DistributedType.NO\\n self.num_processes = 1\\n@@ -237,9 +237,13 @@ def __init__(\\n self.device = torch.device(\"cpu\")\\n else:\\n self.device = torch.device(\"cuda\")\\n- self.mixed_precision = mixed_precision\\n+ self._mixed_precision = mixed_precision\\n \\n- if self.dynamo_backend != DynamoBackend.NO and self.mixed_precision == \"no\" and self.device.type == \"cuda\":\\n+ if (\\n+ self.dynamo_backend != DynamoBackend.NO\\n+ and self._mixed_precision == \"no\"\\n+ and self.device.type == \"cuda\"\\n+ ):\\n torch.backends.cuda.matmul.allow_tf32 = True\\n self.initialized = True\\n \\n@@ -252,17 +256,30 @@ def __repr__(self):\\n f\"Process index: {self.process_index}\\\\n\"\\n f\"Local process index: {self.local_process_index}\\\\n\"\\n f\"Device: {self.device}\\\\n\"\\n+ f\"Mixed precision type: {mixed_precision}\\\\n\"\\n )\\n if self.distributed_type == DistributedType.DEEPSPEED:\\n repr += f\"ds_config: {self.deepspeed_plugin.deepspeed_config}\\\\n\"\\n- else:\\n- repr += f\"Mixed precision type: {mixed_precision}\\\\n\"\\n return repr\\n \\n # For backward compatibility\\n @property\\n def use_fp16(self):\\n- return self.mixed_precision != \"no\"\\n+ return self._mixed_precision != \"no\"\\n+\\n+ @property\\n+ def mixed_precision(self):\\n+ if self.distributed_type == DistributedType.DEEPSPEED:\\n+ config = self.deepspeed_plugin.deepspeed_config\\n+ if config.get(\"fp16\", {}).get(\"enabled\", False):\\n+ mixed_precision = \"fp16\"\\n+ elif config.get(\"bf16\", {}).get(\"enabled\", False):\\n+ mixed_precision = \"bf16\"\\n+ else:\\n+ mixed_precision = \"no\"\\n+ else:\\n+ mixed_precision = self._mixed_precision\\n+ return mixed_precision\\n \\n @staticmethod\\n def _reset_state():\\n@@ -275,7 +292,7 @@ def _check_initialized(self, mixed_precision=None, cpu=None):\\n err = \"AcceleratorState has already been initialized and cannot be changed, restart your runtime completely and pass `{flag}` to `Accelerate()`.\"\\n if cpu and self.device.type != \"cpu\":\\n raise ValueError(err.format(flag=\"cpu=True\"))\\n- if mixed_precision is not None and mixed_precision != self.mixed_precision:\\n+ if mixed_precision is not None and mixed_precision != self._mixed_precision:\\n raise ValueError(err.format(flag=f\"mixed_precision=\\'{mixed_precision}\\'\"))\\n \\n \\n',\n", - " 'code_comments': [],\n", - " 'context': [{'body': 'The docs for this PR live [here](/static-proxy?url=https%3A%2F%2Fmoon-ci-docs.huggingface.co%2Fdocs%2Faccelerate%2Fpr_935). All of your documentation changes will be reflected on that endpoint.',\n", - " 'from_author': False},\n", - " {'body': \"> This is not ideal in terms of naming. I'm wondering if we shouldn't rename the current attribute `_mixed_precision` and have the property named `mixed_precision` which would be clearer. There might be a bit of code to adapt, but I think it's worth the effort, no?\\r\\n\\r\\nYes, I thought of that but changed my mind in order to be least invasive in terms of the changes required. Addressed this in latest commit. Thank you.\",\n", - " 'from_author': True},\n", - " {'body': 'Example with recent changes:\\r\\n\\r\\nCode:\\r\\n```\\r\\nfrom accelerate import Accelerator\\r\\nfrom accelerate.state import AcceleratorState\\r\\n\\r\\ndef main():\\r\\n accelerator = Accelerator()\\r\\n accelerator.print(f\"{AcceleratorState()}\")\\r\\n accelerator.print(f\"{AcceleratorState().mixed_precision=}\")\\r\\n accelerator.print(f\"{accelerator.mixed_precision=}\")\\r\\n\\r\\nif __name__ == \"__main__\":\\r\\n main()\\r\\n```\\r\\n\\r\\nconfig:\\r\\n```yaml\\r\\ncommand_file: null\\r\\ncommands: null\\r\\ncompute_environment: LOCAL_MACHINE\\r\\ndeepspeed_config:\\r\\n gradient_accumulation_steps: 1\\r\\n gradient_clipping: 1.0\\r\\n offload_optimizer_device: \\'cpu\\'\\r\\n offload_param_device: \\'cpu\\'\\r\\n zero3_init_flag: true\\r\\n zero3_save_16bit_model: true\\r\\n zero_stage: 3\\r\\ndistributed_type: DEEPSPEED\\r\\ndowncast_bf16: \\'no\\'\\r\\ndynamo_backend: \\'NO\\'\\r\\nfsdp_config: {}\\r\\ngpu_ids: null\\r\\nmachine_rank: 0\\r\\nmain_process_ip: null\\r\\nmain_process_port: null\\r\\nmain_training_function: main\\r\\nmegatron_lm_config: {}\\r\\nmixed_precision: \\'bf16\\'\\r\\nnum_machines: 1\\r\\nnum_processes: 2\\r\\nrdzv_backend: static\\r\\nsame_network: true\\r\\ntpu_name: null\\r\\ntpu_zone: null\\r\\nuse_cpu: false\\r\\n```\\r\\n\\r\\nOutput\\r\\n```bash\\r\\nDistributed environment: DEEPSPEED Backend: nccl\\r\\nNum processes: 2\\r\\nProcess index: 0\\r\\nLocal process index: 0\\r\\nDevice: cuda:0\\r\\nMixed precision type: bf16\\r\\nds_config: {\\'train_batch_size\\': \\'auto\\', \\'train_micro_batch_size_per_gpu\\': \\'auto\\', \\'gradient_accumulation_steps\\': 1, \\'zero_optimization\\': {\\'stage\\': 3, \\'offload_optimizer\\': {\\'device\\': \\'cpu\\'}, \\'offload_param\\': {\\'device\\': \\'cpu\\'}, \\'stage3_gather_16bit_weights_on_model_save\\': True}, \\'gradient_clipping\\': 1.0, \\'steps_per_print\\': inf, \\'bf16\\': {\\'enabled\\': True}}\\r\\n\\r\\nAcceleratorState().mixed_precision=\\'bf16\\'\\r\\naccelerator.mixed_precision=\\'bf16\\'\\r\\n```',\n", - " 'from_author': True}],\n", - " 'url': 'https://api.github.com/repos/huggingface/accelerate/pulls/935'},\n", - " 1171706400: {'diff': 'diff --git a/src/accelerate/utils/dataclasses.py b/src/accelerate/utils/dataclasses.py\\nindex dc8883425..4f9617711 100644\\n--- a/src/accelerate/utils/dataclasses.py\\n+++ b/src/accelerate/utils/dataclasses.py\\n@@ -23,6 +23,7 @@\\n import os\\n import typing\\n import warnings\\n+from contextlib import contextmanager\\n from dataclasses import dataclass, field\\n from datetime import timedelta\\n from distutils.util import strtobool\\n@@ -529,6 +530,23 @@ def set_deepspeed_weakref(self):\\n \\n self.dschf = HfDeepSpeedConfig(ds_config) # keep this object alive # noqa\\n \\n+ def is_zero3_init_enabled(self):\\n+ return self.zero3_init_flag\\n+\\n+ @contextmanager\\n+ def zero3_init_context_manager(self, enable=False):\\n+ old = self.zero3_init_flag\\n+ if old == enable:\\n+ yield\\n+ else:\\n+ self.zero3_init_flag = enable\\n+ self.dschf = None\\n+ self.set_deepspeed_weakref()\\n+ yield\\n+ self.zero3_init_flag = old\\n+ self.dschf = None\\n+ self.set_deepspeed_weakref()\\n+\\n \\n @dataclass\\n class FullyShardedDataParallelPlugin:\\n',\n", - " 'code_comments': [{'body': 'Might be clearer to have an `else` block here instead of the early `return`.',\n", - " 'diff_hunk': '@@ -529,6 +530,23 @@ def set_deepspeed_weakref(self):\\n \\n self.dschf = HfDeepSpeedConfig(ds_config) # keep this object alive # noqa\\n \\n+ def is_zero3_init_enabled(self):\\n+ return self.zero3_init_flag\\n+\\n+ @contextmanager\\n+ def set_zero3_init(self, enable=False):\\n+ old = self.zero3_init_flag\\n+ if old == enable:\\n+ yield\\n+ return\\n+ self.zero3_init_flag = enable\\n+ self.dschf = None\\n+ self.set_deepspeed_weakref()\\n+ yield\\n+ self.zero3_init_flag = old\\n+ self.dschf = None\\n+ self.set_deepspeed_weakref()',\n", - " 'from_author': False},\n", - " {'body': 'Done. ',\n", - " 'diff_hunk': '@@ -529,6 +530,23 @@ def set_deepspeed_weakref(self):\\n \\n self.dschf = HfDeepSpeedConfig(ds_config) # keep this object alive # noqa\\n \\n+ def is_zero3_init_enabled(self):\\n+ return self.zero3_init_flag\\n+\\n+ @contextmanager\\n+ def set_zero3_init(self, enable=False):\\n+ old = self.zero3_init_flag\\n+ if old == enable:\\n+ yield\\n+ return\\n+ self.zero3_init_flag = enable\\n+ self.dschf = None\\n+ self.set_deepspeed_weakref()\\n+ yield\\n+ self.zero3_init_flag = old\\n+ self.dschf = None\\n+ self.set_deepspeed_weakref()',\n", - " 'from_author': True}],\n", - " 'context': [{'body': '_The documentation is not available anymore as the PR was closed or merged._',\n", - " 'from_author': False},\n", - " {'body': 'here is how I used your new method:\\r\\n\\r\\n```\\r\\n from transformer.utils import ContextManagers\\r\\n deepspeed_plugin = get_deepspeed_plugin()\\r\\n if deepspeed_plugin is not None:\\r\\n zero_init_disabled_context = [deepspeed_plugin.set_zero3_init(enable=False)]\\r\\n else:\\r\\n zero_init_disabled_context = []\\r\\n\\r\\n with ContextManagers(zero_init_disabled_context):\\r\\n config = AutoConfig.from_pretrained(vision_model_name, **vision_model_params)\\r\\n model = AutoModel.from_config(config, torch_dtype=torch_dtype)\\r\\n```',\n", - " 'from_author': False},\n", - " {'body': '> Would it be more intuitive to call it `zero_init_context_manager`?\\r\\n\\r\\nrenamed it to `zero3_init_context_manager`',\n", - " 'from_author': True}],\n", - " 'url': 'https://api.github.com/repos/huggingface/accelerate/pulls/932'},\n", - " 1168673450: {'diff': 'diff --git a/src/accelerate/commands/launch.py b/src/accelerate/commands/launch.py\\nindex fd5ad7d60..04ff21127 100644\\n--- a/src/accelerate/commands/launch.py\\n+++ b/src/accelerate/commands/launch.py\\n@@ -343,14 +343,14 @@ def launch_command_parser(subparsers=None):\\n )\\n deepspeed_args.add_argument(\\n \"--zero3_init_flag\",\\n- default=None,\\n+ default=\"true\",\\n type=str,\\n help=\"Decides Whether (true|false) to enable `deepspeed.zero.Init` for constructing massive models. \"\\n \"Only applicable with DeepSpeed ZeRO Stage-3.\",\\n )\\n deepspeed_args.add_argument(\\n \"--zero3_save_16bit_model\",\\n- default=None,\\n+ default=\"false\",\\n type=str,\\n help=\"Decides Whether (true|false) to save 16-bit model weights when using ZeRO Stage-3. \"\\n \"Only applicable with DeepSpeed ZeRO Stage-3.\",\\n@@ -759,15 +759,15 @@ def deepspeed_launcher(args):\\n current_env[\"PYTHONPATH\"] = env_var_path_add(\"PYTHONPATH\", os.path.abspath(\".\"))\\n current_env[\"ACCELERATE_MIXED_PRECISION\"] = str(mixed_precision)\\n current_env[\"ACCELERATE_USE_DEEPSPEED\"] = \"true\"\\n- current_env[\"DEEPSPEED_ZERO_STAGE\"] = str(args.zero_stage)\\n- current_env[\"GRADIENT_ACCUMULATION_STEPS\"] = str(args.gradient_accumulation_steps)\\n- current_env[\"GRADIENT_CLIPPING\"] = str(args.gradient_clipping).lower()\\n- current_env[\"DEEPSPEED_OFFLOAD_OPTIMIZER_DEVICE\"] = str(args.offload_optimizer_device).lower()\\n- current_env[\"DEEPSPEED_OFFLOAD_PARAM_DEVICE\"] = str(args.offload_param_device).lower()\\n- current_env[\"DEEPSPEED_ZERO3_INIT\"] = str(args.zero3_init_flag).lower()\\n- current_env[\"DEEPSPEED_ZERO3_SAVE_16BIT_MODEL\"] = str(args.zero3_save_16bit_model).lower()\\n+ current_env[\"ACCELERATE_DEEPSPEED_ZERO_STAGE\"] = str(args.zero_stage)\\n+ current_env[\"ACCELERATE_GRADIENT_ACCUMULATION_STEPS\"] = str(args.gradient_accumulation_steps)\\n+ current_env[\"ACCELERATE_GRADIENT_CLIPPING\"] = str(args.gradient_clipping).lower()\\n+ current_env[\"ACCELERATE_DEEPSPEED_OFFLOAD_OPTIMIZER_DEVICE\"] = str(args.offload_optimizer_device).lower()\\n+ current_env[\"ACCELERATE_DEEPSPEED_OFFLOAD_PARAM_DEVICE\"] = str(args.offload_param_device).lower()\\n+ current_env[\"ACCELERATE_DEEPSPEED_ZERO3_INIT\"] = str(args.zero3_init_flag).lower()\\n+ current_env[\"ACCELERATE_DEEPSPEED_ZERO3_SAVE_16BIT_MODEL\"] = str(args.zero3_save_16bit_model).lower()\\n if args.deepspeed_config_file is not None:\\n- current_env[\"DEEPSPEED_CONFIG_FILE\"] = str(args.deepspeed_config_file)\\n+ current_env[\"ACCELERATE_DEEPSPEED_CONFIG_FILE\"] = str(args.deepspeed_config_file)\\n \\n if args.num_machines > 1 and args.deepspeed_multinode_launcher != DEEPSPEED_MULTINODE_LAUNCHERS[1]:\\n with open(\".deepspeed_env\", \"a\") as f:\\n@@ -1041,8 +1041,7 @@ def launch_command(args):\\n for name, attr in defaults.__dict__.items():\\n if isinstance(attr, dict):\\n for k in defaults.deepspeed_config:\\n- if getattr(args, k) is None:\\n- setattr(args, k, defaults.deepspeed_config[k])\\n+ setattr(args, k, defaults.deepspeed_config[k])\\n for k in defaults.fsdp_config:\\n arg_to_set = k\\n if \"fsdp\" not in arg_to_set:\\ndiff --git a/src/accelerate/utils/dataclasses.py b/src/accelerate/utils/dataclasses.py\\nindex eafad8a09..eea8c3ab3 100644\\n--- a/src/accelerate/utils/dataclasses.py\\n+++ b/src/accelerate/utils/dataclasses.py\\n@@ -361,7 +361,7 @@ def __post_init__(self):\\n from .deepspeed import HfDeepSpeedConfig\\n \\n if self.hf_ds_config is None:\\n- self.hf_ds_config = os.environ.get(\"DEEPSPEED_CONFIG_FILE\", \"none\")\\n+ self.hf_ds_config = os.environ.get(\"ACCELERATE_DEEPSPEED_CONFIG_FILE\", \"none\")\\n if (\\n isinstance(self.hf_ds_config, dict)\\n or (isinstance(self.hf_ds_config, str) and self.hf_ds_config != \"none\")\\n@@ -377,24 +377,26 @@ def __post_init__(self):\\n raise ValueError(\"Please specify the ZeRO optimization config in the DeepSpeed config.\")\\n else:\\n if self.gradient_accumulation_steps is None:\\n- self.gradient_accumulation_steps = int(os.environ.get(\"GRADIENT_ACCUMULATION_STEPS\", 1))\\n+ self.gradient_accumulation_steps = int(os.environ.get(\"ACCELERATE_GRADIENT_ACCUMULATION_STEPS\", 1))\\n \\n if self.gradient_clipping is None:\\n- gradient_clipping = os.environ.get(\"GRADIENT_CLIPPING\", \"none\")\\n+ gradient_clipping = os.environ.get(\"ACCELERATE_GRADIENT_CLIPPING\", \"none\")\\n if gradient_clipping != \"none\":\\n self.gradient_clipping = float(gradient_clipping)\\n \\n if self.zero_stage is None:\\n- self.zero_stage = int(os.environ.get(\"DEEPSPEED_ZERO_STAGE\", 2))\\n+ self.zero_stage = int(os.environ.get(\"ACCELERATE_DEEPSPEED_ZERO_STAGE\", 2))\\n \\n if self.offload_optimizer_device is None:\\n- self.offload_optimizer_device = os.environ.get(\"DEEPSPEED_OFFLOAD_OPTIMIZER_DEVICE\", \"none\")\\n+ self.offload_optimizer_device = os.environ.get(\"ACCELERATE_DEEPSPEED_OFFLOAD_OPTIMIZER_DEVICE\", \"none\")\\n \\n if self.offload_param_device is None:\\n- self.offload_param_device = os.environ.get(\"DEEPSPEED_OFFLOAD_PARAM_DEVICE\", \"none\")\\n+ self.offload_param_device = os.environ.get(\"ACCELERATE_DEEPSPEED_OFFLOAD_PARAM_DEVICE\", \"none\")\\n \\n if self.zero3_save_16bit_model is None:\\n- self.zero3_save_16bit_model = os.environ.get(\"DEEPSPEED_ZERO3_SAVE_16BIT_MODEL\", \"false\") == \"true\"\\n+ self.zero3_save_16bit_model = (\\n+ os.environ.get(\"ACCELERATE_DEEPSPEED_ZERO3_SAVE_16BIT_MODEL\", \"false\") == \"true\"\\n+ )\\n \\n config = {\\n \"train_batch_size\": \"auto\",\\n@@ -417,7 +419,7 @@ def __post_init__(self):\\n self.deepspeed_config = self.hf_ds_config.config\\n self.deepspeed_config[\"steps_per_print\"] = float(\"inf\") # this will stop deepspeed from logging @ stdout\\n if self.zero3_init_flag is None:\\n- self.zero3_init_flag = os.environ.get(\"DEEPSPEED_ZERO3_INIT\", \"false\") == \"true\"\\n+ self.zero3_init_flag = os.environ.get(\"ACCELERATE_DEEPSPEED_ZERO3_INIT\", \"false\") == \"true\"\\n if self.zero3_init_flag and not self.hf_ds_config.is_zero3():\\n warnings.warn(\"DeepSpeed Zero3 Init flag is only applicable for ZeRO Stage 3. Setting it to False.\")\\n self.zero3_init_flag = False\\n',\n", - " 'code_comments': [{'body': 'this logic needs to be recoded since the default is no longer `None` and thus this code will never run.',\n", - " 'diff_hunk': '@@ -377,24 +377,26 @@ def __post_init__(self):\\n raise ValueError(\"Please specify the ZeRO optimization config in the DeepSpeed config.\")\\n else:\\n if self.gradient_accumulation_steps is None:\\n- self.gradient_accumulation_steps = int(os.environ.get(\"GRADIENT_ACCUMULATION_STEPS\", 1))\\n+ self.gradient_accumulation_steps = int(os.environ.get(\"ACCELERATE_GRADIENT_ACCUMULATION_STEPS\", 1))\\n \\n if self.gradient_clipping is None:\\n- gradient_clipping = os.environ.get(\"GRADIENT_CLIPPING\", \"none\")\\n+ gradient_clipping = os.environ.get(\"ACCELERATE_GRADIENT_CLIPPING\", \"none\")\\n if gradient_clipping != \"none\":\\n self.gradient_clipping = float(gradient_clipping)\\n \\n if self.zero_stage is None:\\n- self.zero_stage = int(os.environ.get(\"DEEPSPEED_ZERO_STAGE\", 2))\\n+ self.zero_stage = int(os.environ.get(\"ACCELERATE_DEEPSPEED_ZERO_STAGE\", 2))\\n \\n if self.offload_optimizer_device is None:\\n- self.offload_optimizer_device = os.environ.get(\"DEEPSPEED_OFFLOAD_OPTIMIZER_DEVICE\", \"none\")\\n+ self.offload_optimizer_device = os.environ.get(\"ACCELERATE_DEEPSPEED_OFFLOAD_OPTIMIZER_DEVICE\", \"none\")\\n \\n if self.offload_param_device is None:\\n- self.offload_param_device = os.environ.get(\"DEEPSPEED_OFFLOAD_PARAM_DEVICE\", \"none\")\\n+ self.offload_param_device = os.environ.get(\"ACCELERATE_DEEPSPEED_OFFLOAD_PARAM_DEVICE\", \"none\")\\n \\n if self.zero3_save_16bit_model is None:\\n- self.zero3_save_16bit_model = os.environ.get(\"DEEPSPEED_ZERO3_SAVE_16BIT_MODEL\", \"false\") == \"true\"\\n+ self.zero3_save_16bit_model = (\\n+ os.environ.get(\"ACCELERATE_DEEPSPEED_ZERO3_SAVE_16BIT_MODEL\", \"false\") == \"true\"\\n+ )',\n", - " 'from_author': False},\n", - " {'body': 'this logic needs to be recoded since the default is no longer `None` and thus this code will never run.',\n", - " 'diff_hunk': '@@ -417,7 +419,7 @@ def __post_init__(self):\\n self.deepspeed_config = self.hf_ds_config.config\\n self.deepspeed_config[\"steps_per_print\"] = float(\"inf\") # this will stop deepspeed from logging @ stdout\\n if self.zero3_init_flag is None:\\n- self.zero3_init_flag = os.environ.get(\"DEEPSPEED_ZERO3_INIT\", \"false\") == \"true\"\\n+ self.zero3_init_flag = os.environ.get(\"ACCELERATE_DEEPSPEED_ZERO3_INIT\", \"false\") == \"true\"',\n", - " 'from_author': False},\n", - " {'body': 'may be add to `help` that `ACCELERATE_DEEPSPEED_ZERO3_INIT` env var can override it?',\n", - " 'diff_hunk': '@@ -343,14 +343,14 @@ def launch_command_parser(subparsers=None):\\n )\\n deepspeed_args.add_argument(\\n \"--zero3_init_flag\",\\n- default=None,\\n+ default=\"true\",\\n type=str,\\n help=\"Decides Whether (true|false) to enable `deepspeed.zero.Init` for constructing massive models. \"\\n \"Only applicable with DeepSpeed ZeRO Stage-3.\",',\n", - " 'from_author': False},\n", - " {'body': 'may be add to `help` that `ACCELERATE_DEEPSPEED_ZERO3_SAVE_16BIT_MODEL` env var can override it?',\n", - " 'diff_hunk': '@@ -343,14 +343,14 @@ def launch_command_parser(subparsers=None):\\n )\\n deepspeed_args.add_argument(\\n \"--zero3_init_flag\",\\n- default=None,\\n+ default=\"true\",\\n type=str,\\n help=\"Decides Whether (true|false) to enable `deepspeed.zero.Init` for constructing massive models. \"\\n \"Only applicable with DeepSpeed ZeRO Stage-3.\",\\n )\\n deepspeed_args.add_argument(\\n \"--zero3_save_16bit_model\",\\n- default=None,\\n+ default=\"false\",\\n type=str,\\n help=\"Decides Whether (true|false) to save 16-bit model weights when using ZeRO Stage-3. \"\\n \"Only applicable with DeepSpeed ZeRO Stage-3.\",',\n", - " 'from_author': False},\n", - " {'body': \"Hello, `ACCELERATE_DEEPSPEED_ZERO3_INIT` is set using `zero3_init_flag` arg, so I don't think explicit mention is required as it is only used internally.\",\n", - " 'diff_hunk': '@@ -343,14 +343,14 @@ def launch_command_parser(subparsers=None):\\n )\\n deepspeed_args.add_argument(\\n \"--zero3_init_flag\",\\n- default=None,\\n+ default=\"true\",\\n type=str,\\n help=\"Decides Whether (true|false) to enable `deepspeed.zero.Init` for constructing massive models. \"\\n \"Only applicable with DeepSpeed ZeRO Stage-3.\",',\n", - " 'from_author': True},\n", - " {'body': 'Same explanation as above',\n", - " 'diff_hunk': '@@ -343,14 +343,14 @@ def launch_command_parser(subparsers=None):\\n )\\n deepspeed_args.add_argument(\\n \"--zero3_init_flag\",\\n- default=None,\\n+ default=\"true\",\\n type=str,\\n help=\"Decides Whether (true|false) to enable `deepspeed.zero.Init` for constructing massive models. \"\\n \"Only applicable with DeepSpeed ZeRO Stage-3.\",\\n )\\n deepspeed_args.add_argument(\\n \"--zero3_save_16bit_model\",\\n- default=None,\\n+ default=\"false\",\\n type=str,\\n help=\"Decides Whether (true|false) to save 16-bit model weights when using ZeRO Stage-3. \"\\n \"Only applicable with DeepSpeed ZeRO Stage-3.\",',\n", - " 'from_author': True},\n", - " {'body': 'ah, understood, thank you!',\n", - " 'diff_hunk': '@@ -343,14 +343,14 @@ def launch_command_parser(subparsers=None):\\n )\\n deepspeed_args.add_argument(\\n \"--zero3_init_flag\",\\n- default=None,\\n+ default=\"true\",\\n type=str,\\n help=\"Decides Whether (true|false) to enable `deepspeed.zero.Init` for constructing massive models. \"\\n \"Only applicable with DeepSpeed ZeRO Stage-3.\",',\n", - " 'from_author': False},\n", - " {'body': \"Hello, here dataclass object attributes are still `None` initially if the user hasn't passed them explicitly by directly creating `DeepSpeedPlugin` object. In `__post_init__` call, the env variables which were set via [default] cmd args are used for setting dataclass object attributes.\",\n", - " 'diff_hunk': '@@ -377,24 +377,26 @@ def __post_init__(self):\\n raise ValueError(\"Please specify the ZeRO optimization config in the DeepSpeed config.\")\\n else:\\n if self.gradient_accumulation_steps is None:\\n- self.gradient_accumulation_steps = int(os.environ.get(\"GRADIENT_ACCUMULATION_STEPS\", 1))\\n+ self.gradient_accumulation_steps = int(os.environ.get(\"ACCELERATE_GRADIENT_ACCUMULATION_STEPS\", 1))\\n \\n if self.gradient_clipping is None:\\n- gradient_clipping = os.environ.get(\"GRADIENT_CLIPPING\", \"none\")\\n+ gradient_clipping = os.environ.get(\"ACCELERATE_GRADIENT_CLIPPING\", \"none\")\\n if gradient_clipping != \"none\":\\n self.gradient_clipping = float(gradient_clipping)\\n \\n if self.zero_stage is None:\\n- self.zero_stage = int(os.environ.get(\"DEEPSPEED_ZERO_STAGE\", 2))\\n+ self.zero_stage = int(os.environ.get(\"ACCELERATE_DEEPSPEED_ZERO_STAGE\", 2))\\n \\n if self.offload_optimizer_device is None:\\n- self.offload_optimizer_device = os.environ.get(\"DEEPSPEED_OFFLOAD_OPTIMIZER_DEVICE\", \"none\")\\n+ self.offload_optimizer_device = os.environ.get(\"ACCELERATE_DEEPSPEED_OFFLOAD_OPTIMIZER_DEVICE\", \"none\")\\n \\n if self.offload_param_device is None:\\n- self.offload_param_device = os.environ.get(\"DEEPSPEED_OFFLOAD_PARAM_DEVICE\", \"none\")\\n+ self.offload_param_device = os.environ.get(\"ACCELERATE_DEEPSPEED_OFFLOAD_PARAM_DEVICE\", \"none\")\\n \\n if self.zero3_save_16bit_model is None:\\n- self.zero3_save_16bit_model = os.environ.get(\"DEEPSPEED_ZERO3_SAVE_16BIT_MODEL\", \"false\") == \"true\"\\n+ self.zero3_save_16bit_model = (\\n+ os.environ.get(\"ACCELERATE_DEEPSPEED_ZERO3_SAVE_16BIT_MODEL\", \"false\") == \"true\"\\n+ )',\n", - " 'from_author': True},\n", - " {'body': 'Same explanation as above',\n", - " 'diff_hunk': '@@ -417,7 +419,7 @@ def __post_init__(self):\\n self.deepspeed_config = self.hf_ds_config.config\\n self.deepspeed_config[\"steps_per_print\"] = float(\"inf\") # this will stop deepspeed from logging @ stdout\\n if self.zero3_init_flag is None:\\n- self.zero3_init_flag = os.environ.get(\"DEEPSPEED_ZERO3_INIT\", \"false\") == \"true\"\\n+ self.zero3_init_flag = os.environ.get(\"ACCELERATE_DEEPSPEED_ZERO3_INIT\", \"false\") == \"true\"',\n", - " 'from_author': True},\n", - " {'body': 'my bad then! my apologies for the false alert - thank you for explaining, @pacman100!',\n", - " 'diff_hunk': '@@ -377,24 +377,26 @@ def __post_init__(self):\\n raise ValueError(\"Please specify the ZeRO optimization config in the DeepSpeed config.\")\\n else:\\n if self.gradient_accumulation_steps is None:\\n- self.gradient_accumulation_steps = int(os.environ.get(\"GRADIENT_ACCUMULATION_STEPS\", 1))\\n+ self.gradient_accumulation_steps = int(os.environ.get(\"ACCELERATE_GRADIENT_ACCUMULATION_STEPS\", 1))\\n \\n if self.gradient_clipping is None:\\n- gradient_clipping = os.environ.get(\"GRADIENT_CLIPPING\", \"none\")\\n+ gradient_clipping = os.environ.get(\"ACCELERATE_GRADIENT_CLIPPING\", \"none\")\\n if gradient_clipping != \"none\":\\n self.gradient_clipping = float(gradient_clipping)\\n \\n if self.zero_stage is None:\\n- self.zero_stage = int(os.environ.get(\"DEEPSPEED_ZERO_STAGE\", 2))\\n+ self.zero_stage = int(os.environ.get(\"ACCELERATE_DEEPSPEED_ZERO_STAGE\", 2))\\n \\n if self.offload_optimizer_device is None:\\n- self.offload_optimizer_device = os.environ.get(\"DEEPSPEED_OFFLOAD_OPTIMIZER_DEVICE\", \"none\")\\n+ self.offload_optimizer_device = os.environ.get(\"ACCELERATE_DEEPSPEED_OFFLOAD_OPTIMIZER_DEVICE\", \"none\")\\n \\n if self.offload_param_device is None:\\n- self.offload_param_device = os.environ.get(\"DEEPSPEED_OFFLOAD_PARAM_DEVICE\", \"none\")\\n+ self.offload_param_device = os.environ.get(\"ACCELERATE_DEEPSPEED_OFFLOAD_PARAM_DEVICE\", \"none\")\\n \\n if self.zero3_save_16bit_model is None:\\n- self.zero3_save_16bit_model = os.environ.get(\"DEEPSPEED_ZERO3_SAVE_16BIT_MODEL\", \"false\") == \"true\"\\n+ self.zero3_save_16bit_model = (\\n+ os.environ.get(\"ACCELERATE_DEEPSPEED_ZERO3_SAVE_16BIT_MODEL\", \"false\") == \"true\"\\n+ )',\n", - " 'from_author': False}],\n", - " 'context': [{'body': '_The documentation is not available anymore as the PR was closed or merged._',\n", - " 'from_author': False}],\n", - " 'url': 'https://api.github.com/repos/huggingface/accelerate/pulls/928'},\n", - " 1168181974: {'diff': 'diff --git a/src/accelerate/__init__.py b/src/accelerate/__init__.py\\nindex d22006d54..9129fc1a5 100644\\n--- a/src/accelerate/__init__.py\\n+++ b/src/accelerate/__init__.py\\n@@ -5,7 +5,14 @@\\n __version__ = \"0.15.0.dev0\"\\n \\n from .accelerator import Accelerator\\n-from .big_modeling import cpu_offload, disk_offload, dispatch_model, init_empty_weights, load_checkpoint_and_dispatch\\n+from .big_modeling import (\\n+ cpu_offload,\\n+ disk_offload,\\n+ dispatch_model,\\n+ init_empty_weights,\\n+ init_on_device,\\n+ load_checkpoint_and_dispatch,\\n+)\\n from .launchers import debug_launcher, notebook_launcher\\n from .utils import (\\n DeepSpeedPlugin,\\ndiff --git a/src/accelerate/big_modeling.py b/src/accelerate/big_modeling.py\\nindex 24857ad25..d3247a464 100644\\n--- a/src/accelerate/big_modeling.py\\n+++ b/src/accelerate/big_modeling.py\\n@@ -62,6 +62,31 @@ def init_empty_weights(include_buffers: bool = False):\\n \"\"\"\\n if not is_torch_version(\">=\", \"1.9.0\"):\\n raise NotImplementedError(\"Initializing empty weights to a meta device requires torch >= 1.9.0\")\\n+ with init_on_device(torch.device(\"meta\"), include_buffers=include_buffers) as f:\\n+ yield f\\n+\\n+\\n+@contextmanager\\n+def init_on_device(device: torch.device, include_buffers: bool = False):\\n+ \"\"\"\\n+ A context manager under which models are initialized with all parameters on the specified device.\\n+\\n+ Args:\\n+ device (`torch.device`):\\n+ Device to initialize all parameters on.\\n+ include_buffers (`bool`, *optional*, defaults to `False`):\\n+ Whether or not to also put all buffers on the meta device while initializing.\\n+\\n+ Example:\\n+\\n+ ```python\\n+ import torch.nn as nn\\n+ from accelerate import init_on_device\\n+\\n+ with init_on_device(device=torch.device(\"cuda\")):\\n+ tst = nn.Liner(100, 100) # on `cuda` device\\n+ ```\\n+ \"\"\"\\n old_register_parameter = nn.Module.register_parameter\\n if include_buffers:\\n old_register_buffer = nn.Module.register_buffer\\n@@ -71,12 +96,12 @@ def register_empty_parameter(module, name, param):\\n if param is not None:\\n param_cls = type(module._parameters[name])\\n kwargs = module._parameters[name].__dict__\\n- module._parameters[name] = param_cls(module._parameters[name].to(torch.device(\"meta\")), **kwargs)\\n+ module._parameters[name] = param_cls(module._parameters[name].to(device), **kwargs)\\n \\n def register_empty_buffer(module, name, buffer):\\n old_register_buffer(module, name, buffer)\\n if buffer is not None:\\n- module._buffers[name] = module._buffers[name].to(torch.device(\"meta\"))\\n+ module._buffers[name] = module._buffers[name].to(device)\\n \\n # Patch tensor creation\\n if include_buffers:\\n@@ -89,7 +114,7 @@ def register_empty_buffer(module, name, buffer):\\n \\n def patch_tensor_constructor(fn):\\n def wrapper(*args, **kwargs):\\n- kwargs[\"device\"] = torch.device(\"meta\")\\n+ kwargs[\"device\"] = device\\n return fn(*args, **kwargs)\\n \\n return wrapper\\ndiff --git a/src/accelerate/test_utils/__init__.py b/src/accelerate/test_utils/__init__.py\\nindex faf8a7da8..b58b932b1 100644\\n--- a/src/accelerate/test_utils/__init__.py\\n+++ b/src/accelerate/test_utils/__init__.py\\n@@ -8,6 +8,7 @@\\n require_cpu,\\n require_cuda,\\n require_huggingface_suite,\\n+ require_mps,\\n require_multi_gpu,\\n require_single_gpu,\\n require_torch_min_version,\\ndiff --git a/src/accelerate/test_utils/testing.py b/src/accelerate/test_utils/testing.py\\nindex 94e13412a..3a681bf32 100644\\n--- a/src/accelerate/test_utils/testing.py\\n+++ b/src/accelerate/test_utils/testing.py\\n@@ -87,6 +87,15 @@ def require_cuda(test_case):\\n return unittest.skipUnless(torch.cuda.is_available(), \"test requires a GPU\")(test_case)\\n \\n \\n+def require_mps(test_case):\\n+ \"\"\"\\n+ Decorator marking a test that requires MPS backend. These tests are skipped when torch doesn\\'t support `mps`\\n+ backend.\\n+ \"\"\"\\n+ is_mps_supported = hasattr(torch.backends, \"mps\") and torch.backends.mps.is_available()\\n+ return unittest.skipUnless(is_mps_supported, \"test requires a `mps` backend support in `torch`\")(test_case)\\n+\\n+\\n def require_huggingface_suite(test_case):\\n \"\"\"\\n Decorator marking a test that requires transformers and datasets. These tests are skipped when they are not.\\ndiff --git a/tests/test_big_modeling.py b/tests/test_big_modeling.py\\nindex 4f738f4d8..467a91424 100644\\n--- a/tests/test_big_modeling.py\\n+++ b/tests/test_big_modeling.py\\n@@ -24,10 +24,11 @@\\n disk_offload,\\n dispatch_model,\\n init_empty_weights,\\n+ init_on_device,\\n load_checkpoint_and_dispatch,\\n )\\n from accelerate.hooks import remove_hook_from_submodules\\n-from accelerate.test_utils import require_cuda, require_multi_gpu, require_torch_min_version, slow\\n+from accelerate.test_utils import require_cuda, require_mps, require_multi_gpu, require_torch_min_version, slow\\n from accelerate.utils import offload_state_dict\\n from transformers import AutoModelForCausalLM, AutoTokenizer\\n \\n@@ -109,6 +110,22 @@ def test_init_empty_weights_very_large_model(self):\\n with init_empty_weights():\\n _ = nn.Sequential(*[nn.Linear(10000, 10000) for _ in range(1000)])\\n \\n+ @require_cuda\\n+ def test_init_on_device_cuda(self):\\n+ device = torch.device(\"cuda:0\")\\n+ with init_on_device(device):\\n+ model = nn.Linear(10, 10)\\n+ self.assertEqual(model.weight.device, device)\\n+ self.assertEqual(model.weight.device, device)\\n+\\n+ @require_mps\\n+ def test_init_on_device_mps(self):\\n+ device = torch.device(\"mps:0\")\\n+ with init_on_device(device):\\n+ model = nn.Linear(10, 10)\\n+ self.assertEqual(model.weight.device, device)\\n+ self.assertEqual(model.weight.device, device)\\n+\\n def test_cpu_offload(self):\\n model = ModelForTest()\\n x = torch.randn(2, 3)\\n',\n", - " 'code_comments': [{'body': 'Ah ah, that example needs adapting ;-p\\r\\n',\n", - " 'diff_hunk': '@@ -62,6 +62,32 @@ def init_empty_weights(include_buffers: bool = False):\\n \"\"\"\\n if not is_torch_version(\">=\", \"1.9.0\"):\\n raise NotImplementedError(\"Initializing empty weights to a meta device requires torch >= 1.9.0\")\\n+ with init_on_device(torch.device(\"meta\"), include_buffers=include_buffers) as f:\\n+ yield f\\n+\\n+\\n+@contextmanager\\n+def init_on_device(device: torch.device, include_buffers: bool = False):\\n+ \"\"\"\\n+ A context manager under which models are initialized with all parameters on the specified device.\\n+\\n+ Args:\\n+ device (`torch.device`):\\n+ Device to initialize all parameters on.\\n+ include_buffers (`bool`, *optional*, defaults to `False`):\\n+ Whether or not to also put all buffers on the meta device while initializing.\\n+\\n+ Example:\\n+\\n+ ```python\\n+ import torch.nn as nn\\n+ from accelerate import init_empty_weights\\n+\\n+ # Initialize a model with 100 billions parameters in no time and without using any RAM.\\n+ with init_on_device(device=torch.device(\"cuda\")):\\n+ tst = nn.Liner(100, 100) # on `cuda` device',\n", - " 'from_author': False}],\n", - " 'context': [{'body': '_The documentation is not available anymore as the PR was closed or merged._',\n", - " 'from_author': False},\n", - " {'body': 'I guess we can also extend to specific dtypes as well now.',\n", - " 'from_author': True},\n", - " {'body': 'Which dtype did you have in mind? For floating types there is a default you can set in PyTorch directly.',\n", - " 'from_author': False},\n", - " {'body': 'You mean https://pytorch.org/docs/stable/generated/torch.set_default_dtype.html ? Does fp16/bf16 also work? If so my bad.',\n", - " 'from_author': True},\n", - " {'body': 'Yes it works pretty well. We use it in Transformers [here](https://github.com/huggingface/transformers/blob/17292440c069118fbdb992b9a17da2098fab5b87/src/transformers/modeling_utils.py#L1109) for instance.',\n", - " 'from_author': False}],\n", - " 'url': 'https://api.github.com/repos/huggingface/accelerate/pulls/926'},\n", - " 1165154155: {'diff': 'diff --git a/docs/source/_toctree.yml b/docs/source/_toctree.yml\\nindex 6b9825523..ccaafbc8e 100644\\n--- a/docs/source/_toctree.yml\\n+++ b/docs/source/_toctree.yml\\n@@ -28,7 +28,7 @@\\n - local: usage_guides/tracking\\n title: Using experiment trackers\\n - local: usage_guides/big_modeling\\n- title: How to use large models with small resources\\n+ title: How perform inference on large models with small resources\\n - local: usage_guides/memory\\n title: How to avoid CUDA Out-of-Memory\\n - local: usage_guides/sagemaker\\ndiff --git a/docs/source/usage_guides/big_modeling.mdx b/docs/source/usage_guides/big_modeling.mdx\\nindex 1e13849c6..d7523a03b 100644\\n--- a/docs/source/usage_guides/big_modeling.mdx\\n+++ b/docs/source/usage_guides/big_modeling.mdx\\n@@ -10,7 +10,7 @@ an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express o\\n specific language governing permissions and limitations under the License.\\n -->\\n \\n-# Handling big models\\n+# Handling big models for inference\\n \\n When loading a pretrained model in PyTorch, the usual workflow looks like this:\\n \\n',\n", - " 'code_comments': [],\n", - " 'context': [{'body': '_The documentation is not available anymore as the PR was closed or merged._',\n", - " 'from_author': False}],\n", - " 'url': 'https://api.github.com/repos/huggingface/accelerate/pulls/921'},\n", - " 1163744045: {'diff': 'diff --git a/src/accelerate/utils/modeling.py b/src/accelerate/utils/modeling.py\\nindex ff3e95ead..2774b18ce 100644\\n--- a/src/accelerate/utils/modeling.py\\n+++ b/src/accelerate/utils/modeling.py\\n@@ -85,7 +85,11 @@ def dtype_byte_size(dtype: torch.dtype):\\n \\n \\n def set_module_tensor_to_device(\\n- module: nn.Module, tensor_name: str, device: Union[int, str, torch.device], value: Optional[torch.Tensor] = None\\n+ module: nn.Module,\\n+ tensor_name: str,\\n+ device: Union[int, str, torch.device],\\n+ value: Optional[torch.Tensor] = None,\\n+ dtype: Optional[Union[str, torch.dtype]] = None,\\n ):\\n \"\"\"\\n A helper function to set a given tensor (parameter of buffer) of a module on a specific device (note that doing\\n@@ -97,6 +101,9 @@ def set_module_tensor_to_device(\\n device (`int`, `str` or `torch.device`): The device on which to set the tensor.\\n value (`torch.Tensor`, *optional*): The value of the tensor (useful when going from the meta device to any\\n other device).\\n+ dtype (`torch.dtype`, *optional*):\\n+ If passed along the value of the parameter will be cast to this `dtype`. Otherwise, `value` will be cast to\\n+ the dtype of the existing parameter in the model.\\n \"\"\"\\n # Recurse if needed\\n if \".\" in tensor_name:\\n@@ -116,6 +123,13 @@ def set_module_tensor_to_device(\\n if old_value.device == torch.device(\"meta\") and device not in [\"meta\", torch.device(\"meta\")] and value is None:\\n raise ValueError(f\"{tensor_name} is on the meta device, we need a `value` to put in on {device}.\")\\n \\n+ if value is not None:\\n+ if dtype is None:\\n+ # For compatibility with PyTorch load_state_dict which converts state dict dtype to existing dtype in model\\n+ value = value.to(old_value.dtype)\\n+ elif str(value.dtype).startswith((\"torch.uint\", \"torch.int\", \"torch.bool\")):\\n+ value = value.to(dtype)\\n+\\n with torch.no_grad():\\n if value is None:\\n new_value = old_value.to(device)\\n@@ -680,8 +694,7 @@ def load_checkpoint_in_model(\\n else:\\n for param_name, param in checkpoint.items():\\n module_name = param_name\\n- if dtype is not None and not str(param.dtype).startswith((\"torch.uint\", \"torch.int\", \"torch.bool\")):\\n- param = param.to(dtype)\\n+\\n while len(module_name) > 0 and module_name not in device_map:\\n module_name = \".\".join(module_name.split(\".\")[:-1])\\n if module_name == \"\" and \"\" not in device_map:\\n@@ -696,7 +709,7 @@ def load_checkpoint_in_model(\\n set_module_tensor_to_device(model, param_name, \"meta\")\\n offload_weight(param, param_name, state_dict_folder, index=state_dict_index)\\n else:\\n- set_module_tensor_to_device(model, param_name, param_device, value=param)\\n+ set_module_tensor_to_device(model, param_name, param_device, value=param, dtype=dtype)\\n \\n # Force Python to clean up.\\n del checkpoint\\n',\n", - " 'code_comments': [{'body': 'should this not be better done in ` set_module_tensor_to_device` ? Or maybe additionally add a `torch_dtype` arg to `set_module_tensor_to_device` that handles the param correctly if `value=param` is used? ',\n", - " 'diff_hunk': '@@ -680,8 +680,23 @@ def load_checkpoint_in_model(\\n else:\\n for param_name, param in checkpoint.items():\\n module_name = param_name\\n+\\n if dtype is not None and not str(param.dtype).startswith((\"torch.uint\", \"torch.int\", \"torch.bool\")):\\n param = param.to(dtype)\\n+\\n+ # For compatibility with PyTorch load_state_dict which converts state dict dtype to existing dtype in\\n+ # model\\n+ if dtype is None:\\n+ old_param = model\\n+ splits = param_name.split(\".\")\\n+ for split in splits:\\n+ old_param = getattr(old_param, split)\\n+ if old_param is None:\\n+ break\\n+\\n+ if old_param is not None:\\n+ param = param.to(old_param.dtype)',\n", - " 'from_author': False},\n", - " {'body': 'This is moved to `set_module_tensor_to_device`.',\n", - " 'diff_hunk': '@@ -680,8 +694,7 @@ def load_checkpoint_in_model(\\n else:\\n for param_name, param in checkpoint.items():\\n module_name = param_name\\n- if dtype is not None and not str(param.dtype).startswith((\"torch.uint\", \"torch.int\", \"torch.bool\")):',\n", - " 'from_author': True}],\n", - " 'context': [{'body': '_The documentation is not available anymore as the PR was closed or merged._',\n", - " 'from_author': False},\n", - " {'body': 'Actually before merging, could it maybe be better to handle this in `set_module_tensor_to_device` ? E.g. add a `dtype` argument to the function there? This would be easier for `diffusers` to by in line with `accelerate` I think - see: https://github.com/huggingface/diffusers/blob/727434c206f6c22b746e460293035a1324f0bc13/src/diffusers/modeling_utils.py#L491',\n", - " 'from_author': False}],\n", - " 'url': 'https://api.github.com/repos/huggingface/accelerate/pulls/920'},\n", - " 1161586415: {'diff': 'diff --git a/src/accelerate/accelerator.py b/src/accelerate/accelerator.py\\nindex 1c3782067..7aaee2de7 100644\\n--- a/src/accelerate/accelerator.py\\n+++ b/src/accelerate/accelerator.py\\n@@ -196,7 +196,6 @@ def __init__(\\n self,\\n device_placement: bool = True,\\n split_batches: bool = False,\\n- fp16: bool = None,\\n mixed_precision: Union[PrecisionType, str] = None,\\n gradient_accumulation_steps: int = 1,\\n cpu: bool = False,\\n@@ -220,13 +219,6 @@ def __init__(\\n f\"Unknown mixed_precision mode: {mixed_precision}. Choose between {PrecisionType.list()}\"\\n )\\n \\n- if fp16:\\n- warnings.warn(\\n- \"`fp16=True` is deprecated and will be removed in version 0.15.0 of 🤗 Accelerate. Use `mixed_precision=\\'fp16\\'` instead.\",\\n- FutureWarning,\\n- )\\n- mixed_precision = \"fp16\"\\n-\\n if dynamo_backend is not None:\\n dynamo_backend = DynamoBackend(dynamo_backend.upper())\\n \\ndiff --git a/src/accelerate/commands/launch.py b/src/accelerate/commands/launch.py\\nindex fd5ad7d60..afb6498f3 100644\\n--- a/src/accelerate/commands/launch.py\\n+++ b/src/accelerate/commands/launch.py\\n@@ -20,7 +20,6 @@\\n import os\\n import subprocess\\n import sys\\n-import warnings\\n from ast import literal_eval\\n from pathlib import Path\\n from typing import Dict, List\\n@@ -64,7 +63,6 @@\\n \"--multi-gpu\": \"Distributed GPUs\",\\n \"--tpu\": \"TPU\",\\n \"--mps\": \"MPS\",\\n- \"--use_mps_device\": \"MPS\",\\n \"--use_deepspeed\": \"DeepSpeed Arguments\",\\n \"--use_fsdp\": \"FSDP Arguments\",\\n \"--use_megatron_lm\": \"Megatron-LM Arguments\",\\n@@ -165,12 +163,6 @@ def launch_command_parser(subparsers=None):\\n hardware_args.add_argument(\\n \"--tpu\", default=False, action=\"store_true\", help=\"Whether or not this should launch a TPU training.\"\\n )\\n- hardware_args.add_argument(\\n- \"--use_mps_device\",\\n- default=False,\\n- action=\"store_true\",\\n- help=\"This argument is deprecated, use `--mps` instead.\",\\n- )\\n \\n # Resource selection arguments\\n resource_args = parser.add_argument_group(\\n@@ -191,12 +183,6 @@ def launch_command_parser(subparsers=None):\\n \"Choose between FP16 and BF16 (bfloat16) training. \"\\n \"BF16 training is only supported on Nvidia Ampere GPUs and PyTorch 1.10 or later.\",\\n )\\n- resource_args.add_argument(\\n- \"--fp16\",\\n- default=False,\\n- action=\"store_true\",\\n- help=\"This argument is deprecated, use `--mixed_precision fp16` instead.\",\\n- )\\n resource_args.add_argument(\\n \"--num_processes\", type=int, default=None, help=\"The total number of processes to be launched in parallel.\"\\n )\\n@@ -525,12 +511,6 @@ def simple_launcher(args):\\n \\n current_env = os.environ.copy()\\n current_env[\"ACCELERATE_USE_CPU\"] = str(args.cpu or args.use_cpu)\\n- if args.use_mps_device:\\n- warnings.warn(\\n- \\'`use_mps_device` flag is deprecated and will be removed in version 0.15.0 of 🤗 Accelerate. Use \"--mps\" instead.\\',\\n- FutureWarning,\\n- )\\n- args.mps = True\\n current_env[\"ACCELERATE_USE_MPS_DEVICE\"] = str(args.mps)\\n if args.mps:\\n current_env[\"PYTORCH_ENABLE_MPS_FALLBACK\"] = \"1\"\\n@@ -550,13 +530,6 @@ def simple_launcher(args):\\n f\"Unknown mixed_precision mode: {args.mixed_precision.lower()}. Choose between {PrecisionType.list()}.\"\\n )\\n \\n- if args.fp16:\\n- warnings.warn(\\n- \"`fp16` is deprecated and will be removed in version 0.15.0 of 🤗 Accelerate. Use `mixed_precision fp16` instead.\",\\n- FutureWarning,\\n- )\\n- mixed_precision = \"fp16\"\\n-\\n current_env[\"ACCELERATE_MIXED_PRECISION\"] = str(mixed_precision)\\n \\n try:\\n@@ -614,13 +587,6 @@ def multi_gpu_launcher(args):\\n except ValueError:\\n raise ValueError(f\"Unknown mixed_precision mode: {mixed_precision}. Choose between {PrecisionType.list()}.\")\\n \\n- if args.fp16:\\n- warnings.warn(\\n- \"`fp16` is deprecated and will be removed in version 0.15.0 of 🤗 Accelerate. Use `mixed_precision fp16` instead.\",\\n- FutureWarning,\\n- )\\n- mixed_precision = \"fp16\"\\n-\\n current_env[\"ACCELERATE_MIXED_PRECISION\"] = str(mixed_precision)\\n \\n try:\\n@@ -749,13 +715,6 @@ def deepspeed_launcher(args):\\n f\"Unknown mixed_precision mode: {args.mixed_precision.lower()}. Choose between {PrecisionType.list()}.\"\\n )\\n \\n- if args.fp16:\\n- warnings.warn(\\n- \\'--fp16 flag is deprecated and will be removed in version 0.15.0 of 🤗 Accelerate. Use \"--mixed_precision fp16\" instead.\\',\\n- FutureWarning,\\n- )\\n- mixed_precision = \"fp16\"\\n-\\n current_env[\"PYTHONPATH\"] = env_var_path_add(\"PYTHONPATH\", os.path.abspath(\".\"))\\n current_env[\"ACCELERATE_MIXED_PRECISION\"] = str(mixed_precision)\\n current_env[\"ACCELERATE_USE_DEEPSPEED\"] = \"true\"\\n@@ -925,10 +884,6 @@ def sagemaker_launcher(sagemaker_config: SageMakerConfig, args):\\n f\"Unknown mixed_precision mode: {args.mixed_precision.lower()}. Choose between {PrecisionType.list()}.\"\\n )\\n \\n- if args.fp16:\\n- warnings.warn(\\'--fp16 flag is deprecated. Use \"--mixed_precision fp16\" instead.\\', FutureWarning)\\n- mixed_precision = \"fp16\"\\n-\\n try:\\n dynamo_backend = DynamoBackend(args.dynamo_backend.upper())\\n except ValueError:\\n@@ -1054,15 +1009,12 @@ def launch_command(args):\\n \\n # Those args are handled separately\\n if (\\n- name not in [\"compute_environment\", \"fp16\", \"mixed_precision\", \"distributed_type\"]\\n+ name not in [\"compute_environment\", \"mixed_precision\", \"distributed_type\"]\\n and getattr(args, name, None) is None\\n ):\\n setattr(args, name, attr)\\n if not args.mixed_precision:\\n- if args.fp16:\\n- args.mixed_precision = \"fp16\"\\n- else:\\n- args.mixed_precision = defaults.mixed_precision\\n+ args.mixed_precision = defaults.mixed_precision\\n if args.dynamo_backend is None:\\n warned.append(\"\\\\t`--dynamo_backend` was set to a value of `\\'no\\'`\")\\n args.dynamo_backend = \"no\"\\ndiff --git a/src/accelerate/launchers.py b/src/accelerate/launchers.py\\nindex 5768ec06b..2c9db6a75 100644\\n--- a/src/accelerate/launchers.py\\n+++ b/src/accelerate/launchers.py\\n@@ -15,7 +15,6 @@\\n import os\\n import sys\\n import tempfile\\n-import warnings\\n \\n import torch\\n \\n@@ -23,7 +22,7 @@\\n from .utils import PrecisionType, PrepareForLaunch, patch_environment\\n \\n \\n-def notebook_launcher(function, args=(), num_processes=None, use_fp16=False, mixed_precision=\"no\", use_port=\"29500\"):\\n+def notebook_launcher(function, args=(), num_processes=None, mixed_precision=\"no\", use_port=\"29500\"):\\n \"\"\"\\n Launches a training function, using several processes if it\\'s possible in the current environment (TPU with\\n multiple cores for instance).\\n@@ -104,13 +103,6 @@ def notebook_launcher(function, args=(), num_processes=None, use_fp16=False, mix\\n \"function.\"\\n )\\n \\n- if use_fp16:\\n- warnings.warn(\\n- \"`fp16=True` is deprecated and will be removed in version 0.15.0 of 🤗 Accelerate. Use `mixed_precision=\\'fp16\\'` instead.\",\\n- FutureWarning,\\n- )\\n- mixed_precision = \"fp16\"\\n-\\n # torch.distributed will expect a few environment variable to be here. We set the ones common to each\\n # process here (the other ones will be set be the launcher).\\n with patch_environment(\\ndiff --git a/src/accelerate/test_utils/scripts/test_script.py b/src/accelerate/test_utils/scripts/test_script.py\\nindex 6897d9084..ddb053a8a 100644\\n--- a/src/accelerate/test_utils/scripts/test_script.py\\n+++ b/src/accelerate/test_utils/scripts/test_script.py\\n@@ -275,29 +275,6 @@ def training_check():\\n assert torch.allclose(old_model.a, model.a), \"Did not obtain the same model on CPU or distributed training.\"\\n assert torch.allclose(old_model.b, model.b), \"Did not obtain the same model on CPU or distributed training.\"\\n \\n- # TEST that previous fp16 flag still works\\n- print(\"Legacy FP16 training check.\")\\n- AcceleratorState._reset_state()\\n- accelerator = Accelerator(fp16=True)\\n- train_dl = DataLoader(train_set, batch_size=batch_size, shuffle=True, generator=generator)\\n- model = RegressionModel()\\n- optimizer = torch.optim.SGD(model.parameters(), lr=0.1)\\n-\\n- train_dl, model, optimizer = accelerator.prepare(train_dl, model, optimizer)\\n- set_seed(42)\\n- generator.manual_seed(42)\\n- for _ in range(3):\\n- for batch in train_dl:\\n- model.zero_grad()\\n- output = model(batch[\"x\"])\\n- loss = torch.nn.functional.mse_loss(output, batch[\"y\"])\\n- accelerator.backward(loss)\\n- optimizer.step()\\n-\\n- model = accelerator.unwrap_model(model).cpu()\\n- assert torch.allclose(old_model.a, model.a), \"Did not obtain the same model on CPU or distributed training.\"\\n- assert torch.allclose(old_model.b, model.b), \"Did not obtain the same model on CPU or distributed training.\"\\n-\\n # BF16 support is only for CPU + TPU, and some GPU\\n if is_bf16_available():\\n # Mostly a test that BF16 doesn\\'t crash as the operation inside the model is not converted to BF16\\n',\n", - " 'code_comments': [],\n", - " 'context': [{'body': '_The documentation is not available anymore as the PR was closed or merged._',\n", - " 'from_author': False}],\n", - " 'url': 'https://api.github.com/repos/huggingface/accelerate/pulls/917'},\n", - " 1161333599: {'diff': 'diff --git a/docs/source/package_reference/utilities.mdx b/docs/source/package_reference/utilities.mdx\\nindex 3c1a1065e..9ebb60d34 100644\\n--- a/docs/source/package_reference/utilities.mdx\\n+++ b/docs/source/package_reference/utilities.mdx\\n@@ -24,6 +24,8 @@ These are basic dataclasses used throughout 🤗 Accelerate and they can be pass\\n \\n [[autodoc]] utils.PrecisionType\\n \\n+[[autodoc]] utils.ProjectConfiguration\\n+\\n ## Data Manipulation and Operations\\n \\n These include data operations that mimic the same `torch` ops but can be used on distributed processes.\\ndiff --git a/docs/source/quicktour.mdx b/docs/source/quicktour.mdx\\nindex 6d92e7958..6ea99f9bd 100644\\n--- a/docs/source/quicktour.mdx\\n+++ b/docs/source/quicktour.mdx\\n@@ -370,7 +370,11 @@ Note that since all the model parameters are references to tensors, this will lo\\n ## Saving/loading entire states\\n \\n When training your model, you may want to save the current state of the model, optimizer, random generators, and potentially LR schedulers to be restored in the _same script_.\\n-You can use [`~Accelerator.save_state`] and [`~Accelerator.load_state`] respectively to do so, just by simply passing in a save location. \\n+You can use [`~Accelerator.save_state`] and [`~Accelerator.load_state`] respectively to do so.\\n+\\n+To further customize where and how states saved through [`~Accelerator.save_state`] the [`~utils.ProjectConfiguration`] class can be used. For example \\n+if `automatic_checkpoint_naming` is enabled each saved checkpoint will be located then at `Accelerator.project_dir/checkpoints/checkpoint_{checkpoint_number}`.\\n+\\n If you have registered any other stateful items to be stored through [`~Accelerator.register_for_checkpointing`] they will also be saved and/or loaded.\\n \\n \\ndiff --git a/docs/source/usage_guides/checkpoint.mdx b/docs/source/usage_guides/checkpoint.mdx\\nindex f2684971b..354549205 100644\\n--- a/docs/source/usage_guides/checkpoint.mdx\\n+++ b/docs/source/usage_guides/checkpoint.mdx\\n@@ -17,6 +17,9 @@ saving and loading the model, optimizer, RNG generators, and the GradScaler. Ins\\n - Use [`~Accelerator.save_state`] for saving everything mentioned above to a folder location\\n - Use [`~Accelerator.load_state`] for loading everything stored from an earlier `save_state`\\n \\n+To further customize where and how states saved through [`~Accelerator.save_state`] the [`~utils.ProjectConfiguration`] class can be used. For example \\n+if `automatic_checkpoint_naming` is enabled each saved checkpoint will be located then at `Accelerator.project_dir/checkpoints/checkpoint_{checkpoint_number}`.\\n+\\n It should be noted that the expectation is that those states come from the same training script, they should not be from two separate scripts.\\n \\n - By using [`~Accelerator.register_for_checkpointing`], you can register custom objects to be automatically stored or loaded from the two prior functions,\\n@@ -28,7 +31,7 @@ Below is a brief example using checkpointing to save and reload a state during t\\n from accelerate import Accelerator\\n import torch\\n \\n-accelerator = Accelerator()\\n+accelerator = Accelerator(project_dir=\"my/save/path\")\\n \\n my_scheduler = torch.optim.lr_scheduler.StepLR(my_optimizer, step_size=1, gamma=0.99)\\n my_model, my_optimizer, my_training_dataloader = accelerator.prepare(my_model, my_optimizer, my_training_dataloader)\\n@@ -37,7 +40,7 @@ my_model, my_optimizer, my_training_dataloader = accelerator.prepare(my_model, m\\n accelerator.register_for_checkpointing(my_scheduler)\\n \\n # Save the starting state\\n-accelerator.save_state(\"my/save/path\")\\n+accelerator.save_state()\\n \\n device = accelerator.device\\n my_model.to(device)\\n@@ -56,5 +59,5 @@ for epoch in range(num_epochs):\\n my_scheduler.step()\\n \\n # Restore previous state\\n-accelerator.load_state(\"my/save/path\")\\n+accelerator.load_state(\"my/save/path/checkpointing/checkpoint_0\")\\n ```\\ndiff --git a/docs/source/usage_guides/tracking.mdx b/docs/source/usage_guides/tracking.mdx\\nindex b22a4dd14..3ff40a4a4 100644\\n--- a/docs/source/usage_guides/tracking.mdx\\n+++ b/docs/source/usage_guides/tracking.mdx\\n@@ -83,6 +83,12 @@ for iteration in config[\"num_iterations\"]:\\n accelerator.end_training()\\n ```\\n \\n+If a tracker requires a directory to save data to such as `TensorBoard` then a `logging_dir` or `project_dir` can be passed in. `project_dir` is useful \\n+if there are other further configurations such as those which can be combined with the [`~utils.ProjectConfiguration`] dataclass.\\n+\\n+```python\\n+accelerator = Accelerator(log_with=\"tensorboard\", logging_dir=\".\")\\n+```\\n \\n ## Implementing Custom Trackers\\n \\ndiff --git a/src/accelerate/accelerator.py b/src/accelerate/accelerator.py\\nindex 1c3782067..151d1db35 100644\\n--- a/src/accelerate/accelerator.py\\n+++ b/src/accelerate/accelerator.py\\n@@ -16,6 +16,7 @@\\n import gc\\n import math\\n import os\\n+import shutil\\n import sys\\n import warnings\\n from contextlib import contextmanager\\n@@ -44,6 +45,7 @@\\n LoggerType,\\n MegatronLMPlugin,\\n PrecisionType,\\n+ ProjectConfiguration,\\n RNGType,\\n compare_versions,\\n convert_outputs_to_fp32,\\n@@ -157,8 +159,11 @@ class Accelerator:\\n - `\"comet_ml\"`\\n If `\"all\"` is selected, will pick up all available trackers in the environment and initialize them. Can\\n also accept implementations of `GeneralTracker` for custom trackers, and can be combined with `\"all\"`.\\n- logging_dir (`str`, `os.PathLike`, *optional*):\\n- A path to a directory for storing logs of locally-compatible loggers.\\n+ project_config (`ProjectConfiguration`, *optional*):\\n+ A configuration for how saving the state can be handled.\\n+ project_dir (`str`, `os.PathLike`, *optional*):\\n+ A path to a directory for storing data such as logs of locally-compatible loggers and potentially saved\\n+ checkpoints.\\n dispatch_batches (`bool`, *optional*):\\n If set to `True`, the dataloader prepared by the Accelerator is only iterated through on the main process\\n and then the batches are split and broadcast to each process. Will default to `True` for `DataLoader` whose\\n@@ -205,6 +210,8 @@ def __init__(\\n megatron_lm_plugin: MegatronLMPlugin = None,\\n rng_types: Optional[List[Union[str, RNGType]]] = None,\\n log_with: Optional[List[Union[str, LoggerType, GeneralTracker]]] = None,\\n+ project_dir: Optional[Union[str, os.PathLike]] = None,\\n+ project_config: Optional[ProjectConfiguration] = None,\\n logging_dir: Optional[Union[str, os.PathLike]] = None,\\n dispatch_batches: Optional[bool] = None,\\n even_batches: bool = True,\\n@@ -212,7 +219,19 @@ def __init__(\\n kwargs_handlers: Optional[List[KwargsHandler]] = None,\\n dynamo_backend: Union[DynamoBackend, str] = None,\\n ):\\n- self.logging_dir = logging_dir\\n+ if project_config is not None:\\n+ self.project_configuration = project_config\\n+ else:\\n+ self.project_configuration = ProjectConfiguration(project_dir=project_dir)\\n+\\n+ if logging_dir is not None:\\n+ warnings.warn(\\n+ \"`logging_dir` is deprecated and will be removed in version 0.18.0 of 🤗 Accelerate. Use `project_dir` instead.\",\\n+ FutureWarning,\\n+ )\\n+ self.project_configuration.logging_dir = logging_dir\\n+ if project_dir is not None and self.project_dir is None:\\n+ self.project_configuration.project_dir = project_dir\\n if mixed_precision is not None:\\n mixed_precision = str(mixed_precision)\\n if mixed_precision not in PrecisionType:\\n@@ -420,6 +439,18 @@ def local_process_index(self):\\n def device(self):\\n return self.state.device\\n \\n+ @property\\n+ def project_dir(self):\\n+ return self.project_configuration.project_dir\\n+\\n+ @property\\n+ def logging_dir(self):\\n+ return self.project_configuration.logging_dir\\n+\\n+ @property\\n+ def save_iteration(self):\\n+ return self.project_configuration.iteration\\n+\\n @property\\n def is_main_process(self):\\n \"\"\"True for one process only.\"\"\"\\n@@ -1596,9 +1627,16 @@ def save(self, obj, f):\\n \"\"\"\\n save(obj, f)\\n \\n- def save_state(self, output_dir: str):\\n+ def save_state(self, output_dir: str = None):\\n \"\"\"\\n- Saves the current states of the model, optimizer, scaler, RNG generators, and registered objects.\\n+ Saves the current states of the model, optimizer, scaler, RNG generators, and registered objects to a folder.\\n+\\n+ If a `ProjectConfiguration` was passed to the `Accelerator` object with `automatic_checkpoint_naming` enabled\\n+ then checkpoints will be saved to `self.project_dir/checkpoints`. If the number of current saves is greater\\n+ than `total_limit` then the oldest save is deleted. Each checkpoint is saved in seperate folders named\\n+ `checkpoint_`.\\n+\\n+ Otherwise they are just saved to `output_dir`.\\n \\n \\n \\n@@ -1611,8 +1649,25 @@ def save_state(self, output_dir: str):\\n output_dir (`str` or `os.PathLike`):\\n The name of the folder to save all relevant weights and states.\\n \"\"\"\\n- # Check if folder exists\\n- output_dir = os.path.expanduser(output_dir)\\n+ if self.project_configuration.automatic_checkpoint_naming:\\n+ output_dir = os.path.join(self.project_dir, \"checkpoints\")\\n+ os.makedirs(output_dir, exist_ok=True)\\n+ if self.project_configuration.automatic_checkpoint_naming:\\n+ folders = [os.path.join(output_dir, folder) for folder in os.listdir(output_dir)]\\n+ if self.project_configuration.total_limit is not None and (\\n+ len(folders) + 1 > self.project_configuration.total_limit\\n+ ):\\n+ folders.sort()\\n+ logger.warning(\\n+ f\"Deleting {len(folders) + 1 - self.project_configuration.total_limit} checkpoints to make room for new checkpoint.\"\\n+ )\\n+ for folder in folders[: len(folders) + 1 - self.project_configuration.total_limit]:\\n+ shutil.rmtree(folder)\\n+ output_dir = os.path.join(output_dir, f\"checkpoint_{self.save_iteration}\")\\n+ if os.path.exists(output_dir):\\n+ raise ValueError(\\n+ f\"Checkpoint directory {output_dir} ({self.save_iteration}) already exists. Please manually override `self.save_iteration` with what iteration to start with.\"\\n+ )\\n os.makedirs(output_dir, exist_ok=True)\\n logger.info(f\"Saving current state to {output_dir}\")\\n \\n@@ -1660,6 +1715,7 @@ def save_state(self, output_dir: str):\\n )\\n for i, obj in enumerate(self._custom_objects):\\n save_custom_state(obj, output_dir, i)\\n+ self.project_configuration.iteration += 1\\n return save_location\\n \\n def load_state(self, input_dir: str):\\ndiff --git a/src/accelerate/tracking.py b/src/accelerate/tracking.py\\nindex 75694ba72..7afebea48 100644\\n--- a/src/accelerate/tracking.py\\n+++ b/src/accelerate/tracking.py\\n@@ -147,7 +147,7 @@ class TensorBoardTracker(GeneralTracker):\\n name = \"tensorboard\"\\n requires_logging_directory = True\\n \\n- def __init__(self, run_name: str, logging_dir: Optional[Union[str, os.PathLike]], **kwargs):\\n+ def __init__(self, run_name: str, logging_dir: Optional[Union[str, os.PathLike]] = None, **kwargs):\\n self.run_name = run_name\\n self.logging_dir = os.path.join(logging_dir, run_name)\\n self.writer = tensorboard.SummaryWriter(self.logging_dir, **kwargs)\\n@@ -451,7 +451,6 @@ def __init__(\\n run_name: Optional[str] = None,\\n description: Optional[str] = None,\\n ):\\n-\\n experiment_name = os.getenv(\"MLFLOW_EXPERIMENT_NAME\", experiment_name)\\n run_id = os.getenv(\"MLFLOW_RUN_ID\", run_id)\\n tags = os.getenv(\"MLFLOW_TAGS\", tags)\\ndiff --git a/src/accelerate/utils/__init__.py b/src/accelerate/utils/__init__.py\\nindex be78d0089..d3fb916e8 100644\\n--- a/src/accelerate/utils/__init__.py\\n+++ b/src/accelerate/utils/__init__.py\\n@@ -16,6 +16,7 @@\\n LoggerType,\\n MegatronLMPlugin,\\n PrecisionType,\\n+ ProjectConfiguration,\\n RNGType,\\n SageMakerDistributedType,\\n TensorInformation,\\ndiff --git a/src/accelerate/utils/dataclasses.py b/src/accelerate/utils/dataclasses.py\\nindex eafad8a09..dc8883425 100644\\n--- a/src/accelerate/utils/dataclasses.py\\n+++ b/src/accelerate/utils/dataclasses.py\\n@@ -313,6 +313,39 @@ class TensorInformation:\\n dtype: torch.dtype\\n \\n \\n+@dataclass\\n+class ProjectConfiguration:\\n+ \"\"\"\\n+ Configuration for the Accelerator object based on inner-project needs.\\n+ \"\"\"\\n+\\n+ project_dir: str = field(default=None, metadata={\"help\": \"A path to a directory for storing data.\"})\\n+ logging_dir: str = field(\\n+ default=None,\\n+ metadata={\\n+ \"help\": \"A path to a directory for storing logs of locally-compatible loggers. If None, defaults to `project_dir`.\"\\n+ },\\n+ )\\n+ automatic_checkpoint_naming: bool = field(\\n+ default=False,\\n+ metadata={\"help\": \"Whether saved states should be automatically iteratively named.\"},\\n+ )\\n+\\n+ total_limit: int = field(\\n+ default=None,\\n+ metadata={\"help\": \"The maximum number of total saved states to keep.\"},\\n+ )\\n+\\n+ iteration: int = field(\\n+ default=0,\\n+ metadata={\"help\": \"The current save iteration.\"},\\n+ )\\n+\\n+ def __post_init__(self):\\n+ if self.logging_dir is None:\\n+ self.logging_dir = self.project_dir\\n+\\n+\\n @dataclass\\n class DeepSpeedPlugin:\\n \"\"\"\\ndiff --git a/tests/test_state_checkpointing.py b/tests/test_state_checkpointing.py\\nindex 87b2d3b06..dd2f6a98b 100644\\n--- a/tests/test_state_checkpointing.py\\n+++ b/tests/test_state_checkpointing.py\\n@@ -23,7 +23,7 @@\\n from torch.utils.data import DataLoader, TensorDataset\\n \\n from accelerate import Accelerator\\n-from accelerate.utils import set_seed\\n+from accelerate.utils import ProjectConfiguration, set_seed\\n \\n \\n logger = logging.getLogger(__name__)\\n@@ -75,7 +75,26 @@ def forward(self, x):\\n \\n \\n class CheckpointTest(unittest.TestCase):\\n- def test_can_resume_training(self):\\n+ def test_with_save_limit(self):\\n+ with tempfile.TemporaryDirectory() as tmpdir:\\n+ set_seed(42)\\n+ model = DummyModel()\\n+ optimizer = torch.optim.Adam(params=model.parameters(), lr=1e-3)\\n+ train_dataloader, valid_dataloader = dummy_dataloaders()\\n+ project_config = ProjectConfiguration(total_limit=1, project_dir=tmpdir, automatic_checkpoint_naming=True)\\n+ # Train baseline\\n+ accelerator = Accelerator(project_config=project_config)\\n+ model, optimizer, train_dataloader, valid_dataloader = accelerator.prepare(\\n+ model, optimizer, train_dataloader, valid_dataloader\\n+ )\\n+ # Save initial\\n+ accelerator.save_state()\\n+\\n+ # Save second state\\n+ accelerator.save_state()\\n+ self.assertEqual(len(os.listdir(accelerator.project_dir)), 1)\\n+\\n+ def test_can_resume_training_with_folder(self):\\n with tempfile.TemporaryDirectory() as tmpdir:\\n set_seed(42)\\n model = DummyModel()\\n@@ -126,6 +145,58 @@ def test_can_resume_training(self):\\n self.assertEqual(opt_state1, opt_state3)\\n self.assertEqual(ground_truth_rands, test_rands)\\n \\n+ def test_can_resume_training(self):\\n+ with tempfile.TemporaryDirectory() as tmpdir:\\n+ set_seed(42)\\n+ model = DummyModel()\\n+ optimizer = torch.optim.Adam(params=model.parameters(), lr=1e-3)\\n+ train_dataloader, valid_dataloader = dummy_dataloaders()\\n+ project_config = ProjectConfiguration(automatic_checkpoint_naming=True)\\n+\\n+ # Train baseline\\n+ accelerator = Accelerator(project_dir=tmpdir, project_config=project_config)\\n+ model, optimizer, train_dataloader, valid_dataloader = accelerator.prepare(\\n+ model, optimizer, train_dataloader, valid_dataloader\\n+ )\\n+ # Save initial\\n+ accelerator.save_state()\\n+ (a, b) = model.a.item(), model.b.item()\\n+ opt_state = optimizer.state_dict()\\n+ ground_truth_rands = train(3, model, train_dataloader, optimizer, accelerator)\\n+ (a1, b1) = model.a.item(), model.b.item()\\n+ opt_state1 = optimizer.state_dict()\\n+\\n+ # Train partially\\n+ set_seed(42)\\n+ model = DummyModel()\\n+ optimizer = torch.optim.Adam(params=model.parameters(), lr=1e-3)\\n+ train_dataloader, valid_dataloader = dummy_dataloaders()\\n+ project_config = ProjectConfiguration(iteration=1, automatic_checkpoint_naming=True)\\n+ accelerator = Accelerator(project_dir=tmpdir, project_config=project_config)\\n+ model, optimizer, train_dataloader, valid_dataloader = accelerator.prepare(\\n+ model, optimizer, train_dataloader, valid_dataloader\\n+ )\\n+ accelerator.load_state(os.path.join(tmpdir, \"checkpoints\", \"checkpoint_0\"))\\n+ (a2, b2) = model.a.item(), model.b.item()\\n+ opt_state2 = optimizer.state_dict()\\n+ self.assertEqual(a, a2)\\n+ self.assertEqual(b, b2)\\n+ self.assertEqual(opt_state, opt_state2)\\n+\\n+ test_rands = train(2, model, train_dataloader, optimizer, accelerator)\\n+ # Save everything\\n+ accelerator.save_state()\\n+\\n+ # Load everything back in and make sure all states work\\n+ accelerator.load_state(os.path.join(tmpdir, \"checkpoints\", \"checkpoint_1\"))\\n+ test_rands += train(1, model, train_dataloader, optimizer, accelerator)\\n+ (a3, b3) = model.a.item(), model.b.item()\\n+ opt_state3 = optimizer.state_dict()\\n+ self.assertEqual(a1, a3)\\n+ self.assertEqual(b1, b3)\\n+ self.assertEqual(opt_state1, opt_state3)\\n+ self.assertEqual(ground_truth_rands, test_rands)\\n+\\n def test_invalid_registration(self):\\n t = torch.tensor([1, 2, 3])\\n t1 = torch.tensor([2, 3, 4])\\n@@ -147,19 +218,18 @@ def test_with_scheduler(self):\\n optimizer = torch.optim.Adam(params=model.parameters(), lr=1e-3)\\n scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=1, gamma=0.99)\\n train_dataloader, valid_dataloader = dummy_dataloaders()\\n+ project_config = ProjectConfiguration(automatic_checkpoint_naming=True)\\n # Train baseline\\n- accelerator = Accelerator()\\n- model, optimizer, train_dataloader, valid_dataloader = accelerator.prepare(\\n- model, optimizer, train_dataloader, valid_dataloader\\n+ accelerator = Accelerator(project_dir=tmpdir, project_config=project_config)\\n+ model, optimizer, train_dataloader, valid_dataloader, scheduler = accelerator.prepare(\\n+ model, optimizer, train_dataloader, valid_dataloader, scheduler\\n )\\n- accelerator.register_for_checkpointing(scheduler)\\n # Save initial\\n- initial = os.path.join(tmpdir, \"initial\")\\n- accelerator.save_state(initial)\\n+ accelerator.save_state()\\n scheduler_state = scheduler.state_dict()\\n train(3, model, train_dataloader, optimizer, accelerator, scheduler)\\n self.assertNotEqual(scheduler_state, scheduler.state_dict())\\n \\n # Load everything back in and make sure all states work\\n- accelerator.load_state(initial)\\n+ accelerator.load_state(os.path.join(tmpdir, \"checkpoints\", \"checkpoint_0\"))\\n self.assertEqual(scheduler_state, scheduler.state_dict())\\ndiff --git a/tests/test_tracking.py b/tests/test_tracking.py\\nindex d6ff4908a..1aa341f9b 100644\\n--- a/tests/test_tracking.py\\n+++ b/tests/test_tracking.py\\n@@ -70,9 +70,11 @@ def test_log(self):\\n log = list(filter(lambda x: x.is_file(), Path(f\"{dirpath}/{project_name}\").iterdir()))[0]\\n self.assertNotEqual(str(log), \"\")\\n \\n- def test_logging_dir(self):\\n+ def test_project_dir(self):\\n with self.assertRaisesRegex(ValueError, \"Logging with `tensorboard` requires a `logging_dir`\"):\\n _ = Accelerator(log_with=\"tensorboard\")\\n+ with tempfile.TemporaryDirectory() as dirpath:\\n+ _ = Accelerator(log_with=\"tensorboard\", project_dir=dirpath)\\n with tempfile.TemporaryDirectory() as dirpath:\\n _ = Accelerator(log_with=\"tensorboard\", logging_dir=dirpath)\\n \\n',\n", - " 'code_comments': [{'body': 'This is starting to make too many arguments.',\n", - " 'diff_hunk': '@@ -158,7 +159,16 @@ class Accelerator:\\n If `\"all\"` is selected, will pick up all available trackers in the environment and initialize them. Can\\n also accept implementations of `GeneralTracker` for custom trackers, and can be combined with `\"all\"`.\\n logging_dir (`str`, `os.PathLike`, *optional*):\\n- A path to a directory for storing logs of locally-compatible loggers.\\n+ A path to a directory for storing logs of locally-compatible loggers. If not passed will save in\\n+ `project_dir` by default.\\n+ project_dir (`str`, `os.PathLike`, *optional*):\\n+ A path to a directory for storing data such as logs of locally-compatible loggers and potentially saved\\n+ checkpoints.\\n+ automatic_checkpoint_naming (`bool`, *optional*, defaults to `False`):\\n+ Whether saved states should be stored in `project_location` and be automatically iteratively named.\\n+ save_total_limit (`int`, *optional*):\\n+ The maximum number of checkpoints to keep if performing `automatic_checkpoint_naming`, will default to all\\n+ of them.',\n", - " 'from_author': False},\n", - " {'body': 'Fixed with a `SaveConfiguration` class',\n", - " 'diff_hunk': '@@ -158,7 +159,16 @@ class Accelerator:\\n If `\"all\"` is selected, will pick up all available trackers in the environment and initialize them. Can\\n also accept implementations of `GeneralTracker` for custom trackers, and can be combined with `\"all\"`.\\n logging_dir (`str`, `os.PathLike`, *optional*):\\n- A path to a directory for storing logs of locally-compatible loggers.\\n+ A path to a directory for storing logs of locally-compatible loggers. If not passed will save in\\n+ `project_dir` by default.\\n+ project_dir (`str`, `os.PathLike`, *optional*):\\n+ A path to a directory for storing data such as logs of locally-compatible loggers and potentially saved\\n+ checkpoints.\\n+ automatic_checkpoint_naming (`bool`, *optional*, defaults to `False`):\\n+ Whether saved states should be stored in `project_location` and be automatically iteratively named.\\n+ save_total_limit (`int`, *optional*):\\n+ The maximum number of checkpoints to keep if performing `automatic_checkpoint_naming`, will default to all\\n+ of them.',\n", - " 'from_author': True},\n", - " {'body': \"Let's also deprecate logging_dir to include it in the SaveConfig? Wdyt?\",\n", - " 'diff_hunk': '@@ -205,13 +213,23 @@ def __init__(\\n megatron_lm_plugin: MegatronLMPlugin = None,\\n rng_types: Optional[List[Union[str, RNGType]]] = None,\\n log_with: Optional[List[Union[str, LoggerType, GeneralTracker]]] = None,\\n+ project_dir: Optional[Union[str, os.PathLike]] = None,\\n+ save_config: Optional[SaveConfiguration] = None,\\n logging_dir: Optional[Union[str, os.PathLike]] = None,\\n dispatch_batches: Optional[bool] = None,\\n even_batches: bool = True,\\n step_scheduler_with_optimizer: bool = True,\\n kwargs_handlers: Optional[List[KwargsHandler]] = None,\\n dynamo_backend: Union[DynamoBackend, str] = None,\\n ):\\n+ if save_config is not None:\\n+ self.save_configuration = save_config\\n+ else:\\n+ self.save_configuration = SaveConfiguration()\\n+ if project_dir is not None and self.project_dir is None:\\n+ self.save_configuration.project_dir = project_dir\\n+ if self.project_dir is not None and logging_dir is None:',\n", - " 'from_author': False},\n", - " {'body': 'I was debating on that, if we did so I think it\\'d be better to name it `ProjectConfiguration` instead since it\\'s all relative to the total \"project\" at that point. But can make adjustments for it. The only \"con\" I see is given the conversation above for the user the code would change, but the end-result would not so I think it\\'d be fine',\n", - " 'diff_hunk': '@@ -205,13 +213,23 @@ def __init__(\\n megatron_lm_plugin: MegatronLMPlugin = None,\\n rng_types: Optional[List[Union[str, RNGType]]] = None,\\n log_with: Optional[List[Union[str, LoggerType, GeneralTracker]]] = None,\\n+ project_dir: Optional[Union[str, os.PathLike]] = None,\\n+ save_config: Optional[SaveConfiguration] = None,\\n logging_dir: Optional[Union[str, os.PathLike]] = None,\\n dispatch_batches: Optional[bool] = None,\\n even_batches: bool = True,\\n step_scheduler_with_optimizer: bool = True,\\n kwargs_handlers: Optional[List[KwargsHandler]] = None,\\n dynamo_backend: Union[DynamoBackend, str] = None,\\n ):\\n+ if save_config is not None:\\n+ self.save_configuration = save_config\\n+ else:\\n+ self.save_configuration = SaveConfiguration()\\n+ if project_dir is not None and self.project_dir is None:\\n+ self.save_configuration.project_dir = project_dir\\n+ if self.project_dir is not None and logging_dir is None:',\n", - " 'from_author': True},\n", - " {'body': '```suggestion\\r\\nif there are other further configurations such as those which can be combined with the [`~utils.ProjectConfiguration`] dataclass.\\r\\n```',\n", - " 'diff_hunk': '@@ -83,6 +83,12 @@ for iteration in config[\"num_iterations\"]:\\n accelerator.end_training()\\n ```\\n \\n+If a tracker requires a directory to save data to such as `TensorBoard` then a `logging_dir` or `project_dir` can be passed in. `project_dir` is useful \\n+for if there are other further configurations such as those which can be combined with the [`~utils.ProjectConfiguration`] dataclass.',\n", - " 'from_author': False}],\n", - " 'context': [{'body': '_The documentation is not available anymore as the PR was closed or merged._',\n", - " 'from_author': False},\n", - " {'body': 'In my setup I have a VM for training and a LXC that hosts TensorBoard linked by a NFS share. I set `logging_dir` to the NFS share so the VM pushes its logs directly to TB as they are generated. Since this PR centralizes logging and output I would no longer be able to dump my logs directly to the TB server and instead have to do some complicated rsync watchdog service.\\r\\n\\r\\nCan we keep `logging_dir` as an option? Just set it to `project_dir` by default, no need to depreciate it.',\n", - " 'from_author': False},\n", - " {'body': \"@Cyberes there is a deprecation cycle for `logging_dir` of two full versions and when it comes to trackers `project_dir` acts the exact same as the `logging_dir` parameter, so for your particular setup if I understand correctly would just need to change `logging_dir` to `project_dir` and nothing else. \\r\\n\\r\\nIs this reasonable enough for you? Or is there something more specific/drastic I've misunderstood.\",\n", - " 'from_author': True},\n", - " {'body': \"Just to make sure I'm understanding the new functionality, the checkpoints are saved to `project_dir/checkpoints` and the logs are saved to `project_dir/logs/[name]`?\",\n", - " 'from_author': False},\n", - " {'body': 'Checkpoints are saved to `project_dir/checkpoints`, logs are saved to `project_dir/[name]`',\n", - " 'from_author': True},\n", - " {'body': \"Yeah, I'd like to have the option to put them in completely separate directories. For example, save the checkpoint states to `project_dir/checkpoints` and TB logs to `/mnt/tensorboard`.\\r\\n\\r\\nIf you're against the seperate directories thing I could always override `TensorBoardTracker`, I guess.\",\n", - " 'from_author': False},\n", - " {'body': 'That use case makes sense, will do as you suggested and override only if not passed. Thanks @Cyberes :) ',\n", - " 'from_author': True},\n", - " {'body': 'Sweeeeeeeeeet, thanks @muellerzr.', 'from_author': False}],\n", - " 'url': 'https://api.github.com/repos/huggingface/accelerate/pulls/916'},\n", - " 1160928325: {'diff': 'diff --git a/docs/source/usage_guides/checkpoint.mdx b/docs/source/usage_guides/checkpoint.mdx\\nindex 7d6bbbf99..f2684971b 100644\\n--- a/docs/source/usage_guides/checkpoint.mdx\\n+++ b/docs/source/usage_guides/checkpoint.mdx\\n@@ -31,13 +31,13 @@ import torch\\n accelerator = Accelerator()\\n \\n my_scheduler = torch.optim.lr_scheduler.StepLR(my_optimizer, step_size=1, gamma=0.99)\\n-my_model, my_optimizer, my_training_dataloader = accelerate.prepare(my_model, my_optimizer, my_training_dataloader)\\n+my_model, my_optimizer, my_training_dataloader = accelerator.prepare(my_model, my_optimizer, my_training_dataloader)\\n \\n # Register the LR scheduler\\n-accelerate.register_for_checkpointing(my_scheduler)\\n+accelerator.register_for_checkpointing(my_scheduler)\\n \\n # Save the starting state\\n-accelerate.save_state(\"my/save/path\")\\n+accelerator.save_state(\"my/save/path\")\\n \\n device = accelerator.device\\n my_model.to(device)\\n@@ -56,5 +56,5 @@ for epoch in range(num_epochs):\\n my_scheduler.step()\\n \\n # Restore previous state\\n-accelerate.load_state(\"my/save/path\")\\n+accelerator.load_state(\"my/save/path\")\\n ```\\n',\n", - " 'code_comments': [],\n", - " 'context': [{'body': 'The docs for this PR live [here](/static-proxy?url=https%3A%2F%2Fmoon-ci-docs.huggingface.co%2Fdocs%2Faccelerate%2Fpr_915). All of your documentation changes will be reflected on that endpoint.',\n", - " 'from_author': False},\n", - " {'body': 'Thanks for the fixes! 🤗', 'from_author': False}],\n", - " 'url': 'https://api.github.com/repos/huggingface/accelerate/pulls/915'},\n", - " 1156061410: {'diff': 'diff --git a/src/accelerate/accelerator.py b/src/accelerate/accelerator.py\\nindex 85fc1b4d2..1c3782067 100644\\n--- a/src/accelerate/accelerator.py\\n+++ b/src/accelerate/accelerator.py\\n@@ -939,9 +939,9 @@ def prepare_model(self, model: torch.nn.Module, device_placement=None):\\n backward_prefetch=fsdp_plugin.backward_prefetch,\\n mixed_precision=fsdp_plugin.mixed_precision_policy,\\n ignored_modules=fsdp_plugin.ignored_modules,\\n+ device_id=self.device,\\n+ limit_all_gathers=fsdp_plugin.limit_all_gathers,\\n )\\n- if not fsdp_plugin.cpu_offload.offload_params:\\n- model.to(self.device)\\n self._models[-1] = model\\n elif self.distributed_type == DistributedType.MULTI_CPU:\\n kwargs = self.ddp_handler.to_kwargs() if self.ddp_handler is not None else {}\\ndiff --git a/src/accelerate/utils/dataclasses.py b/src/accelerate/utils/dataclasses.py\\nindex 2e286b657..eafad8a09 100644\\n--- a/src/accelerate/utils/dataclasses.py\\n+++ b/src/accelerate/utils/dataclasses.py\\n@@ -554,6 +554,16 @@ class FullyShardedDataParallelPlugin:\\n },\\n )\\n \\n+ limit_all_gathers: bool = field(\\n+ default=False,\\n+ metadata={\\n+ \"help\": \"If False, then FSDP allows the CPU thread to schedule all-gathers \"\\n+ \"without any extra synchronization. If True, then FSDP explicitly synchronizes the CPU thread to prevent \"\\n+ \"too many in-flight all-gathers. This bool only affects the sharded strategies that schedule all-gathers. \"\\n+ \"Enabling this can help lower the number of CUDA malloc retries.\"\\n+ },\\n+ )\\n+\\n def __post_init__(self):\\n from torch.distributed.fsdp.fully_sharded_data_parallel import (\\n BackwardPrefetch,\\n',\n", - " 'code_comments': [],\n", - " 'context': [{'body': '_The documentation is not available anymore as the PR was closed or merged._',\n", - " 'from_author': False}],\n", - " 'url': 'https://api.github.com/repos/huggingface/accelerate/pulls/911'},\n", - " 1155223453: {'diff': 'diff --git a/src/accelerate/accelerator.py b/src/accelerate/accelerator.py\\nindex 85fc1b4d2..0808ca462 100644\\n--- a/src/accelerate/accelerator.py\\n+++ b/src/accelerate/accelerator.py\\n@@ -349,7 +349,11 @@ def __init__(\\n self.scaler = None\\n self.native_amp = False\\n err = \"{mode} mixed precision requires {requirement}\"\\n- if self.state.mixed_precision == \"fp16\" and self.distributed_type != DistributedType.MEGATRON_LM:\\n+ if (\\n+ self.state.mixed_precision == \"fp16\"\\n+ and self.device.type != \"cpu\"\\n+ and self.distributed_type != DistributedType.MEGATRON_LM\\n+ ):\\n self.native_amp = True\\n if not torch.cuda.is_available() and not parse_flag_from_env(\"ACCELERATE_USE_MPS_DEVICE\"):\\n raise ValueError(err.format(mode=\"fp16\", requirement=\"a GPU\"))\\n@@ -1847,7 +1851,7 @@ def autocast(self):\\n if self.native_amp:\\n if self.mixed_precision == \"fp16\" and is_torch_version(\">=\", \"1.10\"):\\n autocast_context = torch.cuda.amp.autocast(dtype=torch.float16)\\n- elif self.mixed_precision == \"bf16\" and is_bf16_available():\\n+ elif self.mixed_precision == \"bf16\":\\n if self.distributed_type in [DistributedType.NO, DistributedType.MULTI_CPU, DistributedType.MULTI_GPU]:\\n autocast_context = torch.autocast(dtype=torch.bfloat16, device_type=self.device.type)\\n else:\\n',\n", - " 'code_comments': [],\n", - " 'context': [{'body': '@muellerzr @sgugger @yao-matrix @jiqing-feng.',\n", - " 'from_author': True},\n", - " {'body': '_The documentation is not available anymore as the PR was closed or merged._',\n", - " 'from_author': False}],\n", - " 'url': 'https://api.github.com/repos/huggingface/accelerate/pulls/909'},\n", - " 1151607146: {'diff': 'diff --git a/src/accelerate/commands/launch.py b/src/accelerate/commands/launch.py\\nindex 12f67088a..97bd75833 100644\\n--- a/src/accelerate/commands/launch.py\\n+++ b/src/accelerate/commands/launch.py\\n@@ -1006,7 +1006,7 @@ def launch_command(args):\\n raise ValueError(\\n \"You can only use one of `--cpu`, `--multi_gpu`, `--mps`, `--tpu`, `--use_deepspeed`, `--use_fsdp` at a time.\"\\n )\\n- if args.multi_gpu and args.num_processes < 2:\\n+ if args.multi_gpu and (args.num_processes is not None) and (args.num_processes < 2):\\n raise ValueError(\"You need to use at least 2 processes to use `--multi_gpu`.\")\\n \\n defaults = None\\n',\n", - " 'code_comments': [{'body': 'Can all be grouped in the same if ;-)',\n", - " 'diff_hunk': '@@ -1006,8 +1006,9 @@ def launch_command(args):\\n raise ValueError(\\n \"You can only use one of `--cpu`, `--multi_gpu`, `--mps`, `--tpu`, `--use_deepspeed`, `--use_fsdp` at a time.\"\\n )\\n- if args.multi_gpu and args.num_processes < 2:\\n- raise ValueError(\"You need to use at least 2 processes to use `--multi_gpu`.\")\\n+ if args.num_processes is not None:\\n+ if args.multi_gpu and args.num_processes < 2:\\n+ raise ValueError(\"You need to use at least 2 processes to use `--multi_gpu`.\")',\n", - " 'from_author': False}],\n", - " 'context': [{'body': '_The documentation is not available anymore as the PR was closed or merged._',\n", - " 'from_author': False}],\n", - " 'url': 'https://api.github.com/repos/huggingface/accelerate/pulls/907'},\n", - " 1150778099: {'diff': 'diff --git a/src/accelerate/accelerator.py b/src/accelerate/accelerator.py\\nindex 1d6f7ee10..85fc1b4d2 100644\\n--- a/src/accelerate/accelerator.py\\n+++ b/src/accelerate/accelerator.py\\n@@ -365,7 +365,10 @@ def __init__(\\n and self.distributed_type != DistributedType.FSDP\\n and self.distributed_type != DistributedType.MEGATRON_LM\\n ):\\n- self.native_amp = is_bf16_available(True)\\n+ if self.device.type == \"cpu\":\\n+ self.native_amp = is_torch_version(\">=\", \"1.10\")\\n+ else:\\n+ self.native_amp = is_bf16_available(True)\\n if mixed_precision == \"bf16\" and not self.native_amp and not is_tpu_available():\\n raise ValueError(err.format(mode=\"bf16\", requirement=\"PyTorch >= 1.10 and a supported device.\"))\\n \\n@@ -948,8 +951,7 @@ def prepare_model(self, model: torch.nn.Module, device_placement=None):\\n if self.mixed_precision == \"fp16\" and is_torch_version(\">=\", \"1.10\"):\\n model.forward = torch.cuda.amp.autocast(dtype=torch.float16)(model.forward)\\n elif self.mixed_precision == \"bf16\" and self.distributed_type != DistributedType.TPU:\\n- device_type = \"cuda\" if torch.cuda.is_available() else \"cpu\"\\n- model.forward = torch.autocast(device_type=device_type, dtype=torch.bfloat16)(model.forward)\\n+ model.forward = torch.autocast(device_type=self.device.type, dtype=torch.bfloat16)(model.forward)\\n else:\\n model.forward = torch.cuda.amp.autocast()(model.forward)\\n model.forward = convert_outputs_to_fp32(model.forward)\\n@@ -1847,8 +1849,7 @@ def autocast(self):\\n autocast_context = torch.cuda.amp.autocast(dtype=torch.float16)\\n elif self.mixed_precision == \"bf16\" and is_bf16_available():\\n if self.distributed_type in [DistributedType.NO, DistributedType.MULTI_CPU, DistributedType.MULTI_GPU]:\\n- device_type = \"cpu\" if not torch.cuda.is_available() else \"cuda\"\\n- autocast_context = torch.autocast(dtype=torch.bfloat16, device_type=device_type)\\n+ autocast_context = torch.autocast(dtype=torch.bfloat16, device_type=self.device.type)\\n else:\\n autocast_context = torch.cuda.amp.autocast()\\n \\ndiff --git a/src/accelerate/commands/launch.py b/src/accelerate/commands/launch.py\\nindex 12f67088a..af3ba0691 100644\\n--- a/src/accelerate/commands/launch.py\\n+++ b/src/accelerate/commands/launch.py\\n@@ -1089,7 +1089,7 @@ def launch_command(args):\\n [\"MPI_LOCALNRANKS\", \"OMPI_COMM_WORLD_LOCAL_SIZE\", \"MV2_COMM_WORLD_LOCAL_SIZE\"], 1\\n )\\n threads_per_process = int(psutil.cpu_count(logical=False) / local_size)\\n- if args.num_cpu_threads_per_process > 1:\\n+ if threads_per_process > 1:\\n args.num_cpu_threads_per_process = threads_per_process\\n warned.append(\\n f\"\\\\t`--num_cpu_threads_per_process` was set to `{args.num_cpu_threads_per_process}` to improve out-of-box performance when training on CPUs\"\\n',\n", - " 'code_comments': [{'body': 'This line (and the next occurrence) can be removed entirely and just pass in `self.device.type` to `torch.autocast`',\n", - " 'diff_hunk': '@@ -948,7 +951,7 @@ def prepare_model(self, model: torch.nn.Module, device_placement=None):\\n if self.mixed_precision == \"fp16\" and is_torch_version(\">=\", \"1.10\"):\\n model.forward = torch.cuda.amp.autocast(dtype=torch.float16)(model.forward)\\n elif self.mixed_precision == \"bf16\" and self.distributed_type != DistributedType.TPU:\\n- device_type = \"cuda\" if torch.cuda.is_available() else \"cpu\"\\n+ device_type = \"cuda\" if self.device.type != \"cpu\" and torch.cuda.is_available() else \"cpu\"',\n", - " 'from_author': False},\n", - " {'body': 'good point. done',\n", - " 'diff_hunk': '@@ -948,7 +951,7 @@ def prepare_model(self, model: torch.nn.Module, device_placement=None):\\n if self.mixed_precision == \"fp16\" and is_torch_version(\">=\", \"1.10\"):\\n model.forward = torch.cuda.amp.autocast(dtype=torch.float16)(model.forward)\\n elif self.mixed_precision == \"bf16\" and self.distributed_type != DistributedType.TPU:\\n- device_type = \"cuda\" if torch.cuda.is_available() else \"cpu\"\\n+ device_type = \"cuda\" if self.device.type != \"cpu\" and torch.cuda.is_available() else \"cpu\"',\n", - " 'from_author': True},\n", - " {'body': 'done',\n", - " 'diff_hunk': '@@ -948,7 +951,7 @@ def prepare_model(self, model: torch.nn.Module, device_placement=None):\\n if self.mixed_precision == \"fp16\" and is_torch_version(\">=\", \"1.10\"):\\n model.forward = torch.cuda.amp.autocast(dtype=torch.float16)(model.forward)\\n elif self.mixed_precision == \"bf16\" and self.distributed_type != DistributedType.TPU:\\n- device_type = \"cuda\" if torch.cuda.is_available() else \"cpu\"\\n+ device_type = \"cuda\" if self.device.type != \"cpu\" and torch.cuda.is_available() else \"cpu\"',\n", - " 'from_author': True}],\n", - " 'context': [{'body': '@sgugger @yao-matrix @jiqing-feng please notice the issue',\n", - " 'from_author': True},\n", - " {'body': '_The documentation is not available anymore as the PR was closed or merged._',\n", - " 'from_author': False}],\n", - " 'url': 'https://api.github.com/repos/huggingface/accelerate/pulls/906'},\n", - " 1149883513: {'diff': 'diff --git a/src/accelerate/commands/launch.py b/src/accelerate/commands/launch.py\\nindex 081380331..12f67088a 100644\\n--- a/src/accelerate/commands/launch.py\\n+++ b/src/accelerate/commands/launch.py\\n@@ -1002,8 +1002,12 @@ def sagemaker_launcher(sagemaker_config: SageMakerConfig, args):\\n \\n def launch_command(args):\\n # Sanity checks\\n- if sum([args.multi_gpu, args.tpu, args.use_deepspeed, args.use_fsdp]) > 1:\\n- raise ValueError(\"You can only pick one between `--multi_gpu`, `--use_deepspeed`, `--tpu`, `--use_fsdp`.\")\\n+ if sum([args.multi_gpu, args.cpu, args.tpu, args.mps, args.use_deepspeed, args.use_fsdp]) > 1:\\n+ raise ValueError(\\n+ \"You can only use one of `--cpu`, `--multi_gpu`, `--mps`, `--tpu`, `--use_deepspeed`, `--use_fsdp` at a time.\"\\n+ )\\n+ if args.multi_gpu and args.num_processes < 2:\\n+ raise ValueError(\"You need to use at least 2 processes to use `--multi_gpu`.\")\\n \\n defaults = None\\n warned = []\\n',\n", - " 'code_comments': [],\n", - " 'context': [{'body': '_The documentation is not available anymore as the PR was closed or merged._',\n", - " 'from_author': False}],\n", - " 'url': 'https://api.github.com/repos/huggingface/accelerate/pulls/905'},\n", - " 1149817474: {'diff': 'diff --git a/src/accelerate/utils/dataclasses.py b/src/accelerate/utils/dataclasses.py\\nindex 490ea3f4f..2e286b657 100644\\n--- a/src/accelerate/utils/dataclasses.py\\n+++ b/src/accelerate/utils/dataclasses.py\\n@@ -65,7 +65,18 @@ class DistributedDataParallelKwargs(KwargsHandler):\\n \\n `static_graph` is only available in PyTorch 1.11.0 and later versions.\\n \\n- \"\"\"\\n+ \\n+\\n+ Example:\\n+\\n+ ```python\\n+ from accelerate import Accelerator\\n+ from accelerate.utils import DistributedDataParallelKwargs\\n+\\n+ kwargs = DistributedDataParallelKwargs(find_unused_parameters=True)\\n+ accelerator = Accelerator(kwargs_handlers=[kwargs])\\n+ ```\\n+ \"\"\"\\n \\n dim: int = 0\\n broadcast_buffers: bool = True\\n@@ -87,7 +98,18 @@ class GradScalerKwargs(KwargsHandler):\\n \\n `GradScaler` is only available in PyTorch 1.5.0 and later versions.\\n \\n- \"\"\"\\n+ \\n+\\n+ Example:\\n+\\n+ ```python\\n+ from accelerate import Accelerator\\n+ from accelerate.utils import GradScalerKwargs\\n+\\n+ kwargs = GradScalerKwargs(backoff_filter=0.25)\\n+ accelerator = Accelerator(kwargs_handlers=[kwargs])\\n+ ```\\n+ \"\"\"\\n \\n init_scale: float = 65536.0\\n growth_factor: float = 2.0\\n@@ -103,6 +125,15 @@ class InitProcessGroupKwargs(KwargsHandler):\\n to the documentation of this\\n [method](https://pytorch.org/docs/stable/distributed.html#torch.distributed.init_process_group) for more\\n information on each argument.\\n+\\n+ ```python\\n+ from datetime import timedelta\\n+ from accelerate import Accelerator\\n+ from accelerate.utils import InitProcessGroupKwargs\\n+\\n+ kwargs = InitProcessGroupKwargs(timeout=timedelta(seconds=800))\\n+ accelerator = Accelerator(kwargs_handlers=[kwargs])\\n+ ```\\n \"\"\"\\n \\n init_method: Optional[str] = None\\n',\n", - " 'code_comments': [],\n", - " 'context': [{'body': '_The documentation is not available anymore as the PR was closed or merged._',\n", - " 'from_author': False}],\n", - " 'url': 'https://api.github.com/repos/huggingface/accelerate/pulls/904'},\n", - " 1143838386: {'diff': 'diff --git a/docs/source/package_reference/cli.mdx b/docs/source/package_reference/cli.mdx\\nindex 3d72aada6..402d57781 100644\\n--- a/docs/source/package_reference/cli.mdx\\n+++ b/docs/source/package_reference/cli.mdx\\n@@ -125,6 +125,8 @@ accelerate launch [arguments] {training_script} --{training_script-argument-1} -\\n * `-m`, `--module` (`bool`) -- Change each process to interpret the launch script as a Python module, executing with the same behavior as \\'python -m\\'.\\n * `--no_python` (`bool`) -- Skip prepending the training script with \\'python\\' - just execute it directly. Useful when the script is not a Python script.\\n * `--debug` (`bool`) -- Whether to print out the torch.distributed stack trace when something fails.\\n+* `-q`, `--quiet` (`bool`) -- Silence subprocess errors from the launch stack trace to only show the relevant tracebacks. (Only applicable to DeepSpeed and single-process configurations).\\n+\\n \\n The rest of these arguments are configured through `accelerate config` and are read in from the specified `--config_file` (or default configuration) for their \\n values. They can also be passed in manually.\\ndiff --git a/src/accelerate/commands/launch.py b/src/accelerate/commands/launch.py\\nindex d778177c6..081380331 100644\\n--- a/src/accelerate/commands/launch.py\\n+++ b/src/accelerate/commands/launch.py\\n@@ -137,6 +137,12 @@ def launch_command_parser(subparsers=None):\\n parser.add_argument(\\n \"--config_file\", default=None, help=\"The config file to use for the default values in the launching script.\"\\n )\\n+ parser.add_argument(\\n+ \"--quiet\",\\n+ \"-q\",\\n+ action=\"store_true\",\\n+ help=\"Silence subprocess errors from the launch stack trace and only show the relevant tracebacks. (Only applicable to DeepSpeed and single-process configurations)\",\\n+ )\\n # Hardware selection arguments\\n hardware_args = parser.add_argument_group(\\n \"Hardware Selection Arguments\", \"Arguments for selecting the hardware to be used.\"\\n@@ -564,7 +570,10 @@ def simple_launcher(args):\\n process = subprocess.Popen(cmd, env=current_env)\\n process.wait()\\n if process.returncode != 0:\\n- raise subprocess.CalledProcessError(returncode=process.returncode, cmd=cmd)\\n+ if not args.quiet:\\n+ raise subprocess.CalledProcessError(returncode=process.returncode, cmd=cmd)\\n+ else:\\n+ sys.exit(1)\\n \\n \\n def multi_gpu_launcher(args):\\n@@ -770,7 +779,10 @@ def deepspeed_launcher(args):\\n process = subprocess.Popen(cmd, env=current_env)\\n process.wait()\\n if process.returncode != 0:\\n- raise subprocess.CalledProcessError(returncode=process.returncode, cmd=cmd)\\n+ if not args.quiet:\\n+ raise subprocess.CalledProcessError(returncode=process.returncode, cmd=cmd)\\n+ else:\\n+ sys.exit(1)\\n else:\\n if is_torch_version(\"<\", \"1.9.0\"):\\n raise NotImplementedError(\"Multi-node training requires pytorch>=1.9.0\")\\n',\n", - " 'code_comments': [{'body': '```suggestion\\r\\n help=\"Silence subprocess errors from the launch stack trace and only show the relevant tracebacks. (Only applicable to DeepSpeed and single-process configurations)\",\\r\\n```',\n", - " 'diff_hunk': '@@ -137,6 +137,12 @@ def launch_command_parser(subparsers=None):\\n parser.add_argument(\\n \"--config_file\", default=None, help=\"The config file to use for the default values in the launching script.\"\\n )\\n+ parser.add_argument(\\n+ \"--quiet\",\\n+ \"-q\",\\n+ action=\"store_true\",\\n+ help=\"Don\\'t print an error message if an error return code is returned from launch.\",',\n", - " 'from_author': False}],\n", - " 'context': [{'body': '_The documentation is not available anymore as the PR was closed or merged._',\n", - " 'from_author': False},\n", - " {'body': 'I followed the instructions in https://github.com/huggingface/accelerate/pull/895#issuecomment-1333213726\\r\\n```\\r\\nblack tests src examples benchmarks\\r\\nreformatted src/accelerate/commands/launch.py\\r\\n\\r\\nAll done! ✨ 🍰 ✨\\r\\n1 file reformatted, 104 files left unchanged.\\r\\nisort tests src examples benchmarks\\r\\npython utils/style_doc.py src/accelerate docs/source --max_len 119\\r\\nblack --check tests src examples benchmarks\\r\\nAll done! ✨ 🍰 ✨\\r\\n105 files would be left unchanged.\\r\\nisort --check-only tests src examples benchmarks\\r\\nflake8 tests src examples benchmarks\\r\\npython utils/style_doc.py src/accelerate docs/source --max_len 119 --check_only\\r\\n\\r\\n```',\n", - " 'from_author': True},\n", - " {'body': \"I'll make those doc changes. Let me know if I should change it to `--silent`/`-s`.\\r\\nTODO: update docs if it's changed to `--silent`\",\n", - " 'from_author': True},\n", - " {'body': 'No strong opinion on the name on my side.', 'from_author': False},\n", - " {'body': \"We'll keep it as `-q` then, thanks for your contribution @Cyberes! :) \",\n", - " 'from_author': False}],\n", - " 'url': 'https://api.github.com/repos/huggingface/accelerate/pulls/902'},\n", - " 1140692330: {'diff': 'diff --git a/docs/source/package_reference/utilities.mdx b/docs/source/package_reference/utilities.mdx\\nindex 0c64953fc..3c1a1065e 100644\\n--- a/docs/source/package_reference/utilities.mdx\\n+++ b/docs/source/package_reference/utilities.mdx\\n@@ -93,3 +93,10 @@ These utilities relate to setting and synchronizing of all the random states.\\n [[autodoc]] utils.synchronize_rng_state\\n \\n [[autodoc]] utils.synchronize_rng_states\\n+\\n+\\n+## PyTorch XLA\\n+\\n+These include utilities that are useful while using PyTorch with XLA.\\n+\\n+[[autodoc]] utils.install_xla\\ndiff --git a/src/accelerate/utils/__init__.py b/src/accelerate/utils/__init__.py\\nindex f46bce267..be78d0089 100644\\n--- a/src/accelerate/utils/__init__.py\\n+++ b/src/accelerate/utils/__init__.py\\n@@ -128,4 +128,5 @@\\n write_basic_config,\\n )\\n from .random import set_seed, synchronize_rng_state, synchronize_rng_states\\n+from .torch_xla import install_xla\\n from .tqdm import tqdm\\ndiff --git a/src/accelerate/utils/torch_xla.py b/src/accelerate/utils/torch_xla.py\\nnew file mode 100644\\nindex 000000000..59fe7970c\\n--- /dev/null\\n+++ b/src/accelerate/utils/torch_xla.py\\n@@ -0,0 +1,44 @@\\n+# Copyright 2022 The HuggingFace Team. All rights reserved.\\n+#\\n+# Licensed under the Apache License, Version 2.0 (the \"License\");\\n+# you may not use this file except in compliance with the License.\\n+# You may obtain a copy of the License at\\n+#\\n+# http://www.apache.org/licenses/LICENSE-2.0\\n+#\\n+# Unless required by applicable law or agreed to in writing, software\\n+# distributed under the License is distributed on an \"AS IS\" BASIS,\\n+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\\n+# See the License for the specific language governing permissions and\\n+# limitations under the License.\\n+\\n+import subprocess\\n+import sys\\n+\\n+import pkg_resources\\n+\\n+\\n+def install_xla(upgrade: bool = False):\\n+ \"\"\"\\n+ Helper function to install appropriate xla wheels based on the `torch` version.\\n+\\n+ Args:\\n+ upgrade (`bool`, *optional*, defaults to `False`):\\n+ Whether to upgrade `torch` and install the latest `torch_xla` wheels.\\n+ \"\"\"\\n+ in_colab = False\\n+ if \"IPython\" in sys.modules:\\n+ in_colab = \"google.colab\" in str(sys.modules[\"IPython\"].get_ipython())\\n+\\n+ if in_colab:\\n+ if upgrade:\\n+ torch_install_cmd = [\"pip\", \"install\", \"-U\", \"torch\"]\\n+ subprocess.run(torch_install_cmd, check=True)\\n+ # get the current version of torch\\n+ torch_version = pkg_resources.get_distribution(\"torch\").version\\n+ torch_version_trunc = torch_version[: torch_version.rindex(\".\")]\\n+ xla_wheel = f\"https://storage.googleapis.com/tpu-pytorch/wheels/colab/torch_xla-{torch_version_trunc}-cp37-cp37m-linux_x86_64.whl\"\\n+ xla_install_cmd = [\"pip\", \"install\", xla_wheel]\\n+ subprocess.run(xla_install_cmd, check=True)\\n+ else:\\n+ raise RuntimeError(\"`install_xla` utility works only on google colab.\")\\n',\n", - " 'code_comments': [{'body': 'Is there a reason to limit this? Everything should work fine in any other env no? Or is it that the wheels can only work on Colab?',\n", - " 'diff_hunk': '@@ -0,0 +1,43 @@\\n+# Copyright 2022 The HuggingFace Team. All rights reserved.\\n+#\\n+# Licensed under the Apache License, Version 2.0 (the \"License\");\\n+# you may not use this file except in compliance with the License.\\n+# You may obtain a copy of the License at\\n+#\\n+# http://www.apache.org/licenses/LICENSE-2.0\\n+#\\n+# Unless required by applicable law or agreed to in writing, software\\n+# distributed under the License is distributed on an \"AS IS\" BASIS,\\n+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\\n+# See the License for the specific language governing permissions and\\n+# limitations under the License.\\n+\\n+import sys\\n+import subprocess\\n+import pkg_resources\\n+\\n+\\n+def install_xla(upgrade: bool = False):\\n+ \"\"\"\\n+ Helper function to install appropriate xla wheels based on the `torch` version.\\n+\\n+ Args:\\n+ upgrade (`bool`, *optional*, defaults to `False`):\\n+ Whether to upgrade `torch` and install the latest `torch_xla` wheels.\\n+ \"\"\"\\n+ in_colab = False\\n+ if \"IPython\" in sys.modules:\\n+ in_colab = \"google.colab\" in str(sys.modules[\"IPython\"].get_ipython())\\n+\\n+ if in_colab:',\n", - " 'from_author': False},\n", - " {'body': 'The FR was for colab specifically because they have their own wheels for it https://github.com/huggingface/accelerate/issues/586 (otherwise things get more complicated such as trying to get the right python version, etc. Things that can be added later but for rn just an easy way in colab to skip a line :) )',\n", - " 'diff_hunk': '@@ -0,0 +1,43 @@\\n+# Copyright 2022 The HuggingFace Team. All rights reserved.\\n+#\\n+# Licensed under the Apache License, Version 2.0 (the \"License\");\\n+# you may not use this file except in compliance with the License.\\n+# You may obtain a copy of the License at\\n+#\\n+# http://www.apache.org/licenses/LICENSE-2.0\\n+#\\n+# Unless required by applicable law or agreed to in writing, software\\n+# distributed under the License is distributed on an \"AS IS\" BASIS,\\n+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\\n+# See the License for the specific language governing permissions and\\n+# limitations under the License.\\n+\\n+import sys\\n+import subprocess\\n+import pkg_resources\\n+\\n+\\n+def install_xla(upgrade: bool = False):\\n+ \"\"\"\\n+ Helper function to install appropriate xla wheels based on the `torch` version.\\n+\\n+ Args:\\n+ upgrade (`bool`, *optional*, defaults to `False`):\\n+ Whether to upgrade `torch` and install the latest `torch_xla` wheels.\\n+ \"\"\"\\n+ in_colab = False\\n+ if \"IPython\" in sys.modules:\\n+ in_colab = \"google.colab\" in str(sys.modules[\"IPython\"].get_ipython())\\n+\\n+ if in_colab:',\n", - " 'from_author': False}],\n", - " 'context': [{'body': '_The documentation is not available anymore as the PR was closed or merged._',\n", - " 'from_author': False}],\n", - " 'url': 'https://api.github.com/repos/huggingface/accelerate/pulls/896'},\n", - " 1139261065: {'diff': 'diff --git a/src/accelerate/accelerator.py b/src/accelerate/accelerator.py\\nindex 4e23c18af..1d6f7ee10 100644\\n--- a/src/accelerate/accelerator.py\\n+++ b/src/accelerate/accelerator.py\\n@@ -237,7 +237,7 @@ def __init__(\\n else:\\n assert isinstance(\\n deepspeed_plugin, DeepSpeedPlugin\\n- ), \"`deepspeed_plugin` must be a DeepSpeedPlugin object.\"\\n+ ), \"`deepspeed_plugin` must be an `accelerate.utils.DeepSpeedPlugin` object.\"\\n os.environ[\"ACCELERATE_USE_DEEPSPEED\"] = \"true\" # use DeepSpeed if plugin is provided\\n if deepspeed_plugin:\\n if not is_deepspeed_available():\\n@@ -285,7 +285,9 @@ def __init__(\\n self.init_handler = None\\n if kwargs_handlers is not None:\\n for handler in kwargs_handlers:\\n- assert isinstance(handler, KwargsHandler), f\"Unsupported kwargs handler passed: {handler}.\"\\n+ assert isinstance(\\n+ handler, KwargsHandler\\n+ ), f\"Unsupported kwargs handler passed: {handler}, must be one that inherits `accelerate.utils.KwargsHandler`.\"\\n if isinstance(handler, DistributedDataParallelKwargs):\\n if self.ddp_handler is not None:\\n raise ValueError(\"You can only pass one `DistributedDataParallelKwargs` in `kwargs_handler`.\")\\n',\n", - " 'code_comments': [],\n", - " 'context': [{'body': '_The documentation is not available anymore as the PR was closed or merged._',\n", - " 'from_author': False}],\n", - " 'url': 'https://api.github.com/repos/huggingface/accelerate/pulls/894'},\n", - " 1139142981: {'diff': 'diff --git a/src/accelerate/commands/menu/keymap.py b/src/accelerate/commands/menu/keymap.py\\nindex 7ce6c0637..3f08236b3 100644\\n--- a/src/accelerate/commands/menu/keymap.py\\n+++ b/src/accelerate/commands/menu/keymap.py\\n@@ -17,10 +17,9 @@\\n \"\"\"\\n \\n \\n+import os\\n import string\\n import sys\\n-import termios\\n-import tty\\n \\n \\n ARROW_KEY_FLAG = 1 << 8\\n@@ -36,24 +35,77 @@\\n \"mod_int\": 91,\\n \"undefined\": sys.maxsize,\\n \"interrupt\": 3,\\n+ \"insert\": 50,\\n+ \"delete\": 51,\\n+ \"pg_up\": 53,\\n+ \"pg_down\": 54,\\n }\\n \\n KEYMAP[\"arrow_begin\"] = KEYMAP[\"up\"]\\n KEYMAP[\"arrow_end\"] = KEYMAP[\"left\"]\\n \\n+if sys.platform == \"win32\":\\n+ WIN_CH_BUFFER = []\\n+ WIN_KEYMAP = {\\n+ b\"\\\\xe0H\": KEYMAP[\"up\"] - ARROW_KEY_FLAG,\\n+ b\"\\\\x00H\": KEYMAP[\"up\"] - ARROW_KEY_FLAG,\\n+ b\"\\\\xe0P\": KEYMAP[\"down\"] - ARROW_KEY_FLAG,\\n+ b\"\\\\x00P\": KEYMAP[\"down\"] - ARROW_KEY_FLAG,\\n+ b\"\\\\xe0M\": KEYMAP[\"right\"] - ARROW_KEY_FLAG,\\n+ b\"\\\\x00M\": KEYMAP[\"right\"] - ARROW_KEY_FLAG,\\n+ b\"\\\\xe0K\": KEYMAP[\"left\"] - ARROW_KEY_FLAG,\\n+ b\"\\\\x00K\": KEYMAP[\"left\"] - ARROW_KEY_FLAG,\\n+ }\\n+\\n for i in range(10):\\n KEYMAP[str(i)] = ord(str(i))\\n \\n \\n def get_raw_chars():\\n \"Gets raw characters from inputs\"\\n- fd = sys.stdin.fileno()\\n- old_settings = termios.tcgetattr(fd)\\n- try:\\n- tty.setraw(fd)\\n- ch = sys.stdin.read(1)\\n- finally:\\n- termios.tcsetattr(fd, termios.TCSADRAIN, old_settings)\\n+ if os.name == \"nt\":\\n+ import msvcrt\\n+\\n+ encoding = \"mbcs\"\\n+ # Flush the keyboard buffer\\n+ while msvcrt.kbhit():\\n+ msvcrt.getwch()\\n+ if len(WIN_CH_BUFFER) == 0:\\n+ # Read the keystroke\\n+ ch = msvcrt.getwch()\\n+ # If it is a prefix char, get second part\\n+ if ch.encode(encoding) in (b\"\\\\x00\", b\"\\\\xe0\"):\\n+ ch2 = ch + msvcrt.getwch()\\n+ # Translate actual Win chars to bullet char types\\n+ try:\\n+ chx = chr(WIN_KEYMAP[ch2.encode(encoding)])\\n+ WIN_CH_BUFFER.append(chr(KEYMAP[\"mod_int\"]))\\n+ WIN_CH_BUFFER.append(chx)\\n+ if ord(chx) in (\\n+ KEYMAP[\"insert\"] - 1 << 9,\\n+ KEYMAP[\"delete\"] - 1 << 9,\\n+ KEYMAP[\"pg_up\"] - 1 << 9,\\n+ KEYMAP[\"pg_down\"] - 1 << 9,\\n+ ):\\n+ WIN_CH_BUFFER.append(chr(126))\\n+ ch = chr(KEYMAP[\"esc\"])\\n+ except KeyError:\\n+ ch = ch2[1]\\n+ else:\\n+ pass\\n+ else:\\n+ ch = WIN_CH_BUFFER.pop(0)\\n+ elif os.name == \"posix\":\\n+ import termios\\n+ import tty\\n+\\n+ fd = sys.stdin.fileno()\\n+ old_settings = termios.tcgetattr(fd)\\n+ try:\\n+ tty.setraw(fd)\\n+ ch = sys.stdin.read(1)\\n+ finally:\\n+ termios.tcsetattr(fd, termios.TCSADRAIN, old_settings)\\n return ch\\n \\n \\ndiff --git a/src/accelerate/commands/menu/selection_menu.py b/src/accelerate/commands/menu/selection_menu.py\\nindex 751f6818a..d5a1c8e35 100644\\n--- a/src/accelerate/commands/menu/selection_menu.py\\n+++ b/src/accelerate/commands/menu/selection_menu.py\\n@@ -15,6 +15,8 @@\\n \"\"\"\\n Main driver for the selection menu, based on https://github.com/bchao1/bullet\\n \"\"\"\\n+import sys\\n+\\n from . import cursor, input\\n from .helpers import Direction, clear_line, forceWrite, linebreak, move_cursor, reset_cursor, writeColor\\n from .keymap import KEYMAP\\n@@ -30,12 +32,22 @@ def __init__(self, prompt: str = None, choices: list = []):\\n self.position = 0\\n self.choices = choices\\n self.prompt = prompt\\n+ if sys.platform == \"win32\":\\n+ self.arrow_char = \"*\"\\n+ else:\\n+ self.arrow_char = \"➔ \"\\n+\\n+ def write_choice(self, index, end: str = \"\"):\\n+ if sys.platform != \"win32\":\\n+ writeColor(self.choices[index], 32, end)\\n+ else:\\n+ forceWrite(self.choices[index], end)\\n \\n def print_choice(self, index: int):\\n \"Prints the choice at the given index\"\\n if index == self.position:\\n- forceWrite(\" ➔ \")\\n- writeColor(self.choices[index], 32)\\n+ forceWrite(f\" {self.arrow_char} \")\\n+ self.write_choice(index)\\n else:\\n forceWrite(f\" {self.choices[index]}\")\\n reset_cursor()\\n@@ -109,6 +121,5 @@ def run(self, default_choice: int = 0):\\n for _ in range(len(self.choices) + 1):\\n move_cursor(1, \"UP\")\\n clear_line()\\n- forceWrite(\" ➔ \")\\n- writeColor(self.choices[choice], 32, \"\\\\n\")\\n+ self.write_choice(choice, \"\\\\n\")\\n return choice\\n',\n", - " 'code_comments': [],\n", - " 'context': [{'body': '_The documentation is not available anymore as the PR was closed or merged._',\n", - " 'from_author': False}],\n", - " 'url': 'https://api.github.com/repos/huggingface/accelerate/pulls/893'},\n", - " 1139132684: {'diff': 'diff --git a/src/accelerate/utils/modeling.py b/src/accelerate/utils/modeling.py\\nindex e471731a4..ff3e95ead 100644\\n--- a/src/accelerate/utils/modeling.py\\n+++ b/src/accelerate/utils/modeling.py\\n@@ -21,11 +21,10 @@\\n from collections import defaultdict\\n from typing import Dict, List, Optional, Tuple, Union\\n \\n-import numpy as np\\n import torch\\n import torch.nn as nn\\n \\n-from .offload import offload_weight, save_offload_index\\n+from .offload import load_offloaded_weight, offload_weight, save_offload_index\\n \\n \\n WEIGHTS_INDEX_NAME = \"pytorch_model.bin.index.json\"\\n@@ -324,9 +323,8 @@ def load_offloaded_weights(model, index, offload_folder):\\n \\n for param_name, metadata in index.items():\\n tensor_file = os.path.join(offload_folder, f\"{param_name}.dat\")\\n- shape = tuple(metadata[\"shape\"])\\n- weight = np.memmap(tensor_file, dtype=metadata[\"dtype\"], mode=\"r\", shape=shape)\\n- set_module_tensor_to_device(model, param_name, \"cpu\", value=torch.tensor(weight))\\n+ weight = load_offloaded_weight(tensor_file, metadata)\\n+ set_module_tensor_to_device(model, param_name, \"cpu\", value=weight)\\n \\n \\n def get_balanced_memory(\\n',\n", - " 'code_comments': [],\n", - " 'context': [{'body': '_The documentation is not available anymore as the PR was closed or merged._',\n", - " 'from_author': False}],\n", - " 'url': 'https://api.github.com/repos/huggingface/accelerate/pulls/892'},\n", - " 1138809005: {'diff': 'diff --git a/tests/deepspeed/test_deepspeed.py b/tests/deepspeed/test_deepspeed.py\\nindex 86c9766a1..eca75c1ed 100644\\n--- a/tests/deepspeed/test_deepspeed.py\\n+++ b/tests/deepspeed/test_deepspeed.py\\n@@ -119,7 +119,7 @@ def setUp(self):\\n self.ds_config_dict = dict(zero2=config_zero2, zero3=config_zero3)\\n \\n self.dist_env = dict(\\n- USE_DEEPSPEED=\"true\",\\n+ ACCELERATE_USE_DEEPSPEED=\"true\",\\n MASTER_ADDR=\"localhost\",\\n MASTER_PORT=\"10999\",\\n RANK=\"0\",\\ndiff --git a/tests/fsdp/test_fsdp.py b/tests/fsdp/test_fsdp.py\\nindex e90106c17..19917c671 100644\\n--- a/tests/fsdp/test_fsdp.py\\n+++ b/tests/fsdp/test_fsdp.py\\n@@ -58,7 +58,7 @@ def setUp(self):\\n super().setUp()\\n \\n self.dist_env = dict(\\n- USE_FSDP=\"true\",\\n+ ACCELERATE_USE_FSDP=\"true\",\\n MASTER_ADDR=\"localhost\",\\n MASTER_PORT=\"10999\",\\n RANK=\"0\",\\n@@ -147,7 +147,7 @@ def test_mixed_precision(self):\\n \\n for mp_dtype in dtypes:\\n env = self.dist_env.copy()\\n- env[\"MIXED_PRECISION\"] = mp_dtype\\n+ env[\"ACCELERATE_MIXED_PRECISION\"] = mp_dtype\\n with mockenv_context(**env):\\n accelerator = Accelerator()\\n if mp_dtype == \"fp16\":\\n',\n", - " 'code_comments': [],\n", - " 'context': [{'body': '_The documentation is not available anymore as the PR was closed or merged._',\n", - " 'from_author': False}],\n", - " 'url': 'https://api.github.com/repos/huggingface/accelerate/pulls/891'},\n", - " 1137735853: {'diff': 'diff --git a/src/accelerate/accelerator.py b/src/accelerate/accelerator.py\\nindex 0d0fe0aa6..fabf4991e 100644\\n--- a/src/accelerate/accelerator.py\\n+++ b/src/accelerate/accelerator.py\\n@@ -119,9 +119,9 @@ class Accelerator:\\n in your script multiplied by the number of processes.\\n mixed_precision (`str`, *optional*):\\n Whether or not to use mixed precision training (fp16 or bfloat16). Choose from \\'no\\',\\'fp16\\',\\'bf16\\'. Will\\n- default to the value in the environment variable `MIXED_PRECISION`, which will use the default value in the\\n- accelerate config of the current system or the flag passed with the `accelerate.launch` command. \\'fp16\\'\\n- requires pytorch 1.6 or higher. \\'bf16\\' requires pytorch 1.10 or higher.\\n+ default to the value in the environment variable `ACCELERATE_MIXED_PRECISION`, which will use the default\\n+ value in the accelerate config of the current system or the flag passed with the `accelerate.launch`\\n+ command. \\'fp16\\' requires pytorch 1.6 or higher. \\'bf16\\' requires pytorch 1.10 or higher.\\n gradient_accumulation_steps (`int`, *optional*, default to 1):\\n The number of steps that should pass before gradients are accumulated. A number > 1 should be combined with\\n `Accelerator.accumulate`.\\n@@ -231,39 +231,49 @@ def __init__(\\n dynamo_backend = DynamoBackend(dynamo_backend.upper())\\n \\n if deepspeed_plugin is None: # init from env variables\\n- deepspeed_plugin = DeepSpeedPlugin() if os.environ.get(\"USE_DEEPSPEED\", \"false\") == \"true\" else None\\n+ deepspeed_plugin = (\\n+ DeepSpeedPlugin() if os.environ.get(\"ACCELERATE_USE_DEEPSPEED\", \"false\") == \"true\" else None\\n+ )\\n else:\\n assert isinstance(\\n deepspeed_plugin, DeepSpeedPlugin\\n ), \"`deepspeed_plugin` must be a DeepSpeedPlugin object.\"\\n- os.environ[\"USE_DEEPSPEED\"] = \"true\" # use DeepSpeed if plugin is provided\\n+ os.environ[\"ACCELERATE_USE_DEEPSPEED\"] = \"true\" # use DeepSpeed if plugin is provided\\n if deepspeed_plugin:\\n if not is_deepspeed_available():\\n raise ImportError(\"DeepSpeed is not installed => run `pip install deepspeed` or build it from source.\")\\n if compare_versions(\"deepspeed\", \"<\", \"0.6.5\"):\\n raise ImportError(\"DeepSpeed version must be >= 0.6.5. Please update DeepSpeed.\")\\n \\n- mixed_precision = os.environ.get(\"MIXED_PRECISION\", \"no\") if mixed_precision is None else mixed_precision\\n+ mixed_precision = (\\n+ os.environ.get(\"ACCELERATE_MIXED_PRECISION\", \"no\") if mixed_precision is None else mixed_precision\\n+ )\\n deepspeed_plugin.set_mixed_precision(mixed_precision)\\n deepspeed_plugin.set_deepspeed_weakref()\\n \\n- if os.environ.get(\"USE_FSDP\", \"false\") == \"true\" or isinstance(fsdp_plugin, FullyShardedDataParallelPlugin):\\n+ if os.environ.get(\"ACCELERATE_USE_FSDP\", \"false\") == \"true\" or isinstance(\\n+ fsdp_plugin, FullyShardedDataParallelPlugin\\n+ ):\\n if is_torch_version(\"<\", \"1.12.0\"):\\n raise ValueError(\"FSDP requires PyTorch >= 1.12.0\")\\n \\n if fsdp_plugin is None: # init from env variables\\n- fsdp_plugin = FullyShardedDataParallelPlugin() if os.environ.get(\"USE_FSDP\", \"false\") == \"true\" else None\\n+ fsdp_plugin = (\\n+ FullyShardedDataParallelPlugin() if os.environ.get(\"ACCELERATE_USE_FSDP\", \"false\") == \"true\" else None\\n+ )\\n else:\\n if not isinstance(fsdp_plugin, FullyShardedDataParallelPlugin):\\n raise TypeError(\"`fsdp_plugin` must be a FullyShardedDataParallelPlugin object.\")\\n- os.environ[\"USE_FSDP\"] = \"true\" # use FSDP if plugin is provided\\n+ os.environ[\"ACCELERATE_USE_FSDP\"] = \"true\" # use FSDP if plugin is provided\\n \\n if megatron_lm_plugin is None: # init from env variables\\n- megatron_lm_plugin = MegatronLMPlugin() if os.environ.get(\"USE_MEGATRON_LM\", \"false\") == \"true\" else None\\n+ megatron_lm_plugin = (\\n+ MegatronLMPlugin() if os.environ.get(\"ACCELERATE_USE_MEGATRON_LM\", \"false\") == \"true\" else None\\n+ )\\n else:\\n if not isinstance(megatron_lm_plugin, MegatronLMPlugin):\\n raise TypeError(\"`megatron_lm_plugin` must be a MegatronLMPlugin object.\")\\n- os.environ[\"USE_MEGATRON_LM\"] = \"true\" # use MegatronLM if plugin is provided\\n+ os.environ[\"ACCELERATE_USE_MEGATRON_LM\"] = \"true\" # use MegatronLM if plugin is provided\\n \\n if megatron_lm_plugin:\\n if not is_megatron_lm_available():\\n@@ -339,7 +349,7 @@ def __init__(\\n err = \"{mode} mixed precision requires {requirement}\"\\n if self.state.mixed_precision == \"fp16\" and self.distributed_type != DistributedType.MEGATRON_LM:\\n self.native_amp = True\\n- if not torch.cuda.is_available() and not parse_flag_from_env(\"USE_MPS_DEVICE\"):\\n+ if not torch.cuda.is_available() and not parse_flag_from_env(\"ACCELERATE_USE_MPS_DEVICE\"):\\n raise ValueError(err.format(mode=\"fp16\", requirement=\"a GPU\"))\\n kwargs = self.scaler_handler.to_kwargs() if self.scaler_handler is not None else {}\\n if self.distributed_type == DistributedType.FSDP:\\ndiff --git a/src/accelerate/commands/launch.py b/src/accelerate/commands/launch.py\\nindex 9e37bf0db..d778177c6 100644\\n--- a/src/accelerate/commands/launch.py\\n+++ b/src/accelerate/commands/launch.py\\n@@ -518,14 +518,14 @@ def simple_launcher(args):\\n cmd.extend(args.training_script_args)\\n \\n current_env = os.environ.copy()\\n- current_env[\"USE_CPU\"] = str(args.cpu or args.use_cpu)\\n+ current_env[\"ACCELERATE_USE_CPU\"] = str(args.cpu or args.use_cpu)\\n if args.use_mps_device:\\n warnings.warn(\\n \\'`use_mps_device` flag is deprecated and will be removed in version 0.15.0 of 🤗 Accelerate. Use \"--mps\" instead.\\',\\n FutureWarning,\\n )\\n args.mps = True\\n- current_env[\"USE_MPS_DEVICE\"] = str(args.mps)\\n+ current_env[\"ACCELERATE_USE_MPS_DEVICE\"] = str(args.mps)\\n if args.mps:\\n current_env[\"PYTORCH_ENABLE_MPS_FALLBACK\"] = \"1\"\\n elif args.gpu_ids != \"all\" and args.gpu_ids is not None:\\n@@ -551,13 +551,13 @@ def simple_launcher(args):\\n )\\n mixed_precision = \"fp16\"\\n \\n- current_env[\"MIXED_PRECISION\"] = str(mixed_precision)\\n+ current_env[\"ACCELERATE_MIXED_PRECISION\"] = str(mixed_precision)\\n \\n try:\\n dynamo_backend = DynamoBackend(args.dynamo_backend.upper())\\n except ValueError:\\n raise ValueError(f\"Unknown dynamo backend: {args.dynamo_backend.upper()}. Choose between {DYNAMO_BACKENDS}.\")\\n- current_env[\"DYNAMO_BACKEND\"] = dynamo_backend.value\\n+ current_env[\"ACCELERATE_DYNAMO_BACKEND\"] = dynamo_backend.value\\n \\n current_env[\"OMP_NUM_THREADS\"] = str(args.num_cpu_threads_per_process)\\n \\n@@ -612,16 +612,16 @@ def multi_gpu_launcher(args):\\n )\\n mixed_precision = \"fp16\"\\n \\n- current_env[\"MIXED_PRECISION\"] = str(mixed_precision)\\n+ current_env[\"ACCELERATE_MIXED_PRECISION\"] = str(mixed_precision)\\n \\n try:\\n dynamo_backend = DynamoBackend(args.dynamo_backend.upper())\\n except ValueError:\\n raise ValueError(f\"Unknown dynamo backend: {args.dynamo_backend.upper()}. Choose between {DYNAMO_BACKENDS}.\")\\n- current_env[\"DYNAMO_BACKEND\"] = dynamo_backend.value\\n+ current_env[\"ACCELERATE_DYNAMO_BACKEND\"] = dynamo_backend.value\\n \\n if args.use_fsdp:\\n- current_env[\"USE_FSDP\"] = \"true\"\\n+ current_env[\"ACCELERATE_USE_FSDP\"] = \"true\"\\n current_env[\"FSDP_SHARDING_STRATEGY\"] = str(args.fsdp_sharding_strategy)\\n current_env[\"FSDP_OFFLOAD_PARAMS\"] = str(args.fsdp_offload_params).lower()\\n current_env[\"FSDP_MIN_NUM_PARAMS\"] = str(args.fsdp_min_num_params)\\n@@ -636,7 +636,7 @@ def multi_gpu_launcher(args):\\n \\n if args.use_megatron_lm:\\n prefix = \"MEGATRON_LM_\"\\n- current_env[\"USE_MEGATRON_LM\"] = \"true\"\\n+ current_env[\"ACCELERATE_USE_MEGATRON_LM\"] = \"true\"\\n current_env[prefix + \"TP_DEGREE\"] = str(args.megatron_lm_tp_degree)\\n current_env[prefix + \"PP_DEGREE\"] = str(args.megatron_lm_pp_degree)\\n current_env[prefix + \"GRADIENT_CLIPPING\"] = str(args.megatron_lm_gradient_clipping)\\n@@ -748,8 +748,8 @@ def deepspeed_launcher(args):\\n mixed_precision = \"fp16\"\\n \\n current_env[\"PYTHONPATH\"] = env_var_path_add(\"PYTHONPATH\", os.path.abspath(\".\"))\\n- current_env[\"MIXED_PRECISION\"] = str(mixed_precision)\\n- current_env[\"USE_DEEPSPEED\"] = \"true\"\\n+ current_env[\"ACCELERATE_MIXED_PRECISION\"] = str(mixed_precision)\\n+ current_env[\"ACCELERATE_USE_DEEPSPEED\"] = \"true\"\\n current_env[\"DEEPSPEED_ZERO_STAGE\"] = str(args.zero_stage)\\n current_env[\"GRADIENT_ACCUMULATION_STEPS\"] = str(args.gradient_accumulation_steps)\\n current_env[\"GRADIENT_CLIPPING\"] = str(args.gradient_clipping).lower()\\n@@ -924,10 +924,10 @@ def sagemaker_launcher(sagemaker_config: SageMakerConfig, args):\\n \\n # Environment variables to be set for use during training job\\n environment = {\\n- \"USE_SAGEMAKER\": \"true\",\\n- \"MIXED_PRECISION\": str(mixed_precision),\\n- \"DYNAMO_BACKEND\": dynamo_backend.value,\\n- \"SAGEMAKER_DISTRIBUTED_TYPE\": sagemaker_config.distributed_type.value,\\n+ \"ACCELERATE_USE_SAGEMAKER\": \"true\",\\n+ \"ACCELERATE_MIXED_PRECISION\": str(mixed_precision),\\n+ \"ACCELERATE_DYNAMO_BACKEND\": dynamo_backend.value,\\n+ \"ACCELERATE_SAGEMAKER_DISTRIBUTED_TYPE\": sagemaker_config.distributed_type.value,\\n }\\n # configure distribution set up\\n distribution = None\\ndiff --git a/src/accelerate/launchers.py b/src/accelerate/launchers.py\\nindex 4de8b5858..5768ec06b 100644\\n--- a/src/accelerate/launchers.py\\n+++ b/src/accelerate/launchers.py\\n@@ -163,9 +163,9 @@ def debug_launcher(function, args=(), num_processes=2):\\n world_size=num_processes,\\n master_addr=\"127.0.01\",\\n master_port=\"29500\",\\n- mixed_precision=\"no\",\\n+ accelerate_mixed_precision=\"no\",\\n accelerate_debug_rdv_file=tmp_file.name,\\n- use_cpu=\"yes\",\\n+ accelerate_use_cpu=\"yes\",\\n ):\\n launcher = PrepareForLaunch(function, debug=True)\\n start_processes(launcher, args=args, nprocs=num_processes, start_method=\"fork\")\\ndiff --git a/src/accelerate/state.py b/src/accelerate/state.py\\nindex 18df61bbc..a8a5e7960 100644\\n--- a/src/accelerate/state.py\\n+++ b/src/accelerate/state.py\\n@@ -66,7 +66,7 @@ def __init__(\\n **kwargs,\\n ):\\n self.__dict__ = self._shared_state\\n- if parse_flag_from_env(\"USE_CPU\"):\\n+ if parse_flag_from_env(\"ACCELERATE_USE_CPU\"):\\n cpu = True\\n self._check_initialized(mixed_precision, cpu)\\n self.fork_launched = parse_flag_from_env(\"FORK_LAUNCHED\", 0)\\n@@ -74,10 +74,12 @@ def __init__(\\n self.backend = None\\n self.deepspeed_plugin = None\\n mixed_precision = (\\n- parse_choice_from_env(\"MIXED_PRECISION\", \"no\") if mixed_precision is None else mixed_precision.lower()\\n+ parse_choice_from_env(\"ACCELERATE_MIXED_PRECISION\", \"no\")\\n+ if mixed_precision is None\\n+ else mixed_precision.lower()\\n )\\n dynamo_backend = (\\n- parse_choice_from_env(\"DYNAMO_BACKEND\", \"no\") if dynamo_backend is None else dynamo_backend\\n+ parse_choice_from_env(\"ACCELERATE_DYNAMO_BACKEND\", \"no\") if dynamo_backend is None else dynamo_backend\\n )\\n self.dynamo_backend = DynamoBackend(dynamo_backend.upper())\\n if not _from_accelerator:\\n@@ -86,11 +88,11 @@ def __init__(\\n \"before using any functionality from the `accelerate` library.\"\\n )\\n if (\\n- os.environ.get(\"USE_SAGEMAKER\", \"false\") == \"true\"\\n- and os.environ.get(\"SAGEMAKER_DISTRIBUTED_TYPE\") != SageMakerDistributedType.NO\\n+ os.environ.get(\"ACCELERATE_USE_SAGEMAKER\", \"false\") == \"true\"\\n+ and os.environ.get(\"ACCELERATE_SAGEMAKER_DISTRIBUTED_TYPE\") != SageMakerDistributedType.NO\\n and not cpu\\n ):\\n- if os.environ.get(\"SAGEMAKER_DISTRIBUTED_TYPE\") == SageMakerDistributedType.DATA_PARALLEL:\\n+ if os.environ.get(\"ACCELERATE_SAGEMAKER_DISTRIBUTED_TYPE\") == SageMakerDistributedType.DATA_PARALLEL:\\n self.distributed_type = DistributedType.MULTI_GPU\\n import smdistributed.dataparallel.torch.torch_smddp # noqa\\n \\n@@ -110,7 +112,7 @@ def __init__(\\n self.local_process_index = xm.get_local_ordinal()\\n self.device = xm.xla_device()\\n if mixed_precision == \"bf16\":\\n- if os.environ.get(\"DOWNCAST_BF16\"):\\n+ if os.environ.get(\"ACCELERATE_DOWNCAST_BF16\"):\\n os.environ[\"XLA_USE_BF16\"] = str(0)\\n os.environ[\"XLA_DOWNCAST_BF16\"] = str(1)\\n self.downcast_bfloat = True\\n@@ -119,7 +121,7 @@ def __init__(\\n os.environ[\"XLA_DOWNCAST_BF16\"] = str(0)\\n self.downcast_bfloat = False\\n self.mixed_precision = mixed_precision\\n- elif os.environ.get(\"USE_DEEPSPEED\", \"false\") == \"true\" and not cpu:\\n+ elif os.environ.get(\"ACCELERATE_USE_DEEPSPEED\", \"false\") == \"true\" and not cpu:\\n assert (\\n is_deepspeed_available()\\n ), \"DeepSpeed is not available => install it using `pip3 install deepspeed` or build it from source\"\\n@@ -153,12 +155,12 @@ def __init__(\\n self.device = torch.device(\"cuda\", self.local_process_index)\\n torch.cuda.set_device(self.device)\\n self.mixed_precision = mixed_precision\\n- if os.environ.get(\"USE_FSDP\", \"false\") == \"true\":\\n+ if os.environ.get(\"ACCELERATE_USE_FSDP\", \"false\") == \"true\":\\n self.distributed_type = DistributedType.FSDP\\n if self.mixed_precision != \"no\":\\n fsdp_plugin.set_mixed_precision(self.mixed_precision)\\n self.fsdp_plugin = fsdp_plugin\\n- if os.environ.get(\"USE_MEGATRON_LM\", \"false\") == \"true\":\\n+ if os.environ.get(\"ACCELERATE_USE_MEGATRON_LM\", \"false\") == \"true\":\\n self.distributed_type = DistributedType.MEGATRON_LM\\n megatron_lm_plugin.set_mixed_precision(self.mixed_precision)\\n self.megatron_lm_plugin = megatron_lm_plugin\\n@@ -207,7 +209,7 @@ def __init__(\\n self.distributed_type = DistributedType.NO\\n self.num_processes = 1\\n self.process_index = self.local_process_index = 0\\n- if parse_flag_from_env(\"USE_MPS_DEVICE\") and not cpu:\\n+ if parse_flag_from_env(\"ACCELERATE_USE_MPS_DEVICE\") and not cpu:\\n if not torch.backends.mps.is_available():\\n if not torch.backends.mps.is_built():\\n raise AssertionError(\\ndiff --git a/src/accelerate/utils/imports.py b/src/accelerate/utils/imports.py\\nindex bfedee162..4ac250e6e 100644\\n--- a/src/accelerate/utils/imports.py\\n+++ b/src/accelerate/utils/imports.py\\n@@ -15,6 +15,7 @@\\n import importlib\\n import os\\n import sys\\n+import warnings\\n from distutils.util import strtobool\\n from functools import lru_cache\\n \\n@@ -93,7 +94,7 @@ def is_bf16_available(ignore_tpu=False):\\n \\n \\n def is_megatron_lm_available():\\n- if strtobool(os.environ.get(\"USE_MEGATRON_LM\", \"False\")) == 1:\\n+ if strtobool(os.environ.get(\"ACCELERATE_USE_MEGATRON_LM\", \"False\")) == 1:\\n package_exists = importlib.util.find_spec(\"megatron\") is not None\\n if package_exists:\\n megatron_version = parse(importlib_metadata.version(\"megatron-lm\"))\\n@@ -130,7 +131,15 @@ def is_boto3_available():\\n \\n \\n def is_rich_available():\\n- return (importlib.util.find_spec(\"rich\") is not None) and (not parse_flag_from_env(\"DISABLE_RICH\"))\\n+ if importlib.util.find_spec(\"rich\") is not None:\\n+ if parse_flag_from_env(\"DISABLE_RICH\"):\\n+ warnings.warn(\\n+ \"The `DISABLE_RICH` flag is deprecated and will be removed in version 0.17.0 of 🤗 Accelerate. Use `ACCELERATE_DISABLE_RICH` instead.\",\\n+ FutureWarning,\\n+ )\\n+ return not parse_flag_from_env(\"DISABLE_RICH\")\\n+ return not parse_flag_from_env(\"ACCELERATE_DISABLE_RICH\")\\n+ return False\\n \\n \\n def is_sagemaker_available():\\n',\n", - " 'code_comments': [{'body': 'This feels over-engineered, and outside the scope of this PR.',\n", - " 'diff_hunk': '@@ -0,0 +1,68 @@\\n+# Copyright 2022 The HuggingFace Team. All rights reserved.\\n+#\\n+# Licensed under the Apache License, Version 2.0 (the \"License\");\\n+# you may not use this file except in compliance with the License.\\n+# You may obtain a copy of the License at\\n+#\\n+# http://www.apache.org/licenses/LICENSE-2.0\\n+#\\n+# Unless required by applicable law or agreed to in writing, software\\n+# distributed under the License is distributed on an \"AS IS\" BASIS,\\n+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\\n+# See the License for the specific language governing permissions and\\n+# limitations under the License.\\n+\\n+import argparse\\n+import warnings\\n+\\n+\\n+def deprecate_value(old=None, new=None, version=None, warn=True, deprecate_str=None):',\n", - " 'from_author': False}],\n", - " 'context': [{'body': '_The documentation is not available anymore as the PR was closed or merged._',\n", - " 'from_author': False}],\n", - " 'url': 'https://api.github.com/repos/huggingface/accelerate/pulls/890'},\n", - " 1136396651: {'diff': 'diff --git a/examples/by_feature/deepspeed_with_config_support.py b/examples/by_feature/deepspeed_with_config_support.py\\nindex 0fdff58ae..6cfee9dda 100755\\n--- a/examples/by_feature/deepspeed_with_config_support.py\\n+++ b/examples/by_feature/deepspeed_with_config_support.py\\n@@ -642,7 +642,7 @@ def group_texts(examples):\\n total_loss += loss.detach().float()\\n loss = loss / args.gradient_accumulation_steps\\n accelerator.backward(loss)\\n- if step % args.gradient_accumulation_steps == 0 or step == len(train_dataloader) - 1:\\n+ if (step + 1) % args.gradient_accumulation_steps == 0 or step == len(train_dataloader) - 1:\\n optimizer.step()\\n lr_scheduler.step()\\n optimizer.zero_grad()\\n',\n", - " 'code_comments': [],\n", - " 'context': [{'body': '_The documentation is not available anymore as the PR was closed or merged._',\n", - " 'from_author': False},\n", - " {'body': 'Thanks! 🤗', 'from_author': False}],\n", - " 'url': 'https://api.github.com/repos/huggingface/accelerate/pulls/887'},\n", - " 1134441117: {'diff': 'diff --git a/src/accelerate/accelerator.py b/src/accelerate/accelerator.py\\nindex 0d0fe0aa6..0541ce506 100644\\n--- a/src/accelerate/accelerator.py\\n+++ b/src/accelerate/accelerator.py\\n@@ -795,7 +795,7 @@ def prepare(self, *args, device_placement=None):\\n - `torch.utils.data.DataLoader`: PyTorch Dataloader\\n - `torch.nn.Module`: PyTorch Module\\n - `torch.optim.Optimizer`: PyTorch Optimizer\\n- - `torch.optim.lr_scheduler._LRScheduler`: PyTorch LR Scheduler\\n+ - `torch.optim.lr_scheduler.LRScheduler`: PyTorch LR Scheduler\\n \\n device_placement (`List[bool]`, *optional*):\\n Used to customize whether automatic device placement should be performed for each object passed. Needs\\n@@ -998,7 +998,7 @@ def _prepare_deepspeed(self, *args):\\n model = obj\\n elif isinstance(obj, (torch.optim.Optimizer, DummyOptim)):\\n optimizer = obj\\n- elif (isinstance(obj, (torch.optim.lr_scheduler._LRScheduler, DummyScheduler))) or (\\n+ elif (isinstance(obj, (LRScheduler, DummyScheduler))) or (\\n type(obj).__name__ in deepspeed.runtime.lr_schedules.VALID_LR_SCHEDULES\\n ):\\n scheduler = obj\\n@@ -1097,7 +1097,7 @@ def _prepare_deepspeed(self, *args):\\n result[i] = engine\\n elif isinstance(result[i], (torch.optim.Optimizer, DummyOptim)):\\n result[i] = optimizer\\n- elif (isinstance(result[i], (torch.optim.lr_scheduler._LRScheduler, DummyScheduler))) or (\\n+ elif (isinstance(result[i], (LRScheduler, DummyScheduler))) or (\\n type(result[i]).__name__ in deepspeed.runtime.lr_schedules.VALID_LR_SCHEDULES\\n ):\\n result[i] = scheduler\\n@@ -1150,7 +1150,7 @@ def _prepare_megatron_lm(self, *args):\\n model = obj\\n elif isinstance(obj, (torch.optim.Optimizer)):\\n optimizer = obj\\n- elif isinstance(obj, (torch.optim.lr_scheduler._LRScheduler, MegatronLMDummyScheduler)):\\n+ elif isinstance(obj, (LRScheduler, MegatronLMDummyScheduler)):\\n scheduler = obj\\n \\n if model is not None:\\n@@ -1260,13 +1260,13 @@ def prepare_optimizer(self, optimizer: torch.optim.Optimizer, device_placement=N\\n self._optimizers.append(optimizer)\\n return optimizer\\n \\n- def prepare_scheduler(self, scheduler: torch.optim.lr_scheduler._LRScheduler):\\n+ def prepare_scheduler(self, scheduler: LRScheduler):\\n \"\"\"\\n Prepares a PyTorch Scheduler for training in any distributed setup. It is recommended to use\\n [`Accelerator.prepare`] instead.\\n \\n Args:\\n- scheduler (`torch.optim.lr_scheduler._LRScheduler`):\\n+ scheduler (`torch.optim.lr_scheduler.LRScheduler`):\\n A vanilla PyTorch scheduler to prepare\\n \"\"\"\\n # We try to find the optimizer associated with `scheduler`, the default is the full list.\\n',\n", - " 'code_comments': [],\n", - " 'context': [{'body': '_The documentation is not available anymore as the PR was closed or merged._',\n", - " 'from_author': False}],\n", - " 'url': 'https://api.github.com/repos/huggingface/accelerate/pulls/884'},\n", - " 1132334450: {'diff': 'diff --git a/src/accelerate/accelerator.py b/src/accelerate/accelerator.py\\nindex a29794fc4..0d0fe0aa6 100644\\n--- a/src/accelerate/accelerator.py\\n+++ b/src/accelerate/accelerator.py\\n@@ -751,7 +751,7 @@ def _prepare_fsdp(self, *args):\\n for obj in args:\\n if isinstance(obj, torch.optim.Optimizer):\\n if len(obj.param_groups) > 1:\\n- logger.warn(\\n+ logger.warning(\\n \"FSDP Warning: When using FSDP, several parameter groups will be conflated into \"\\n \"a single one due to nested module wrapping and parameter flattening.\"\\n )\\n@@ -831,7 +831,7 @@ def prepare(self, *args, device_placement=None):\\n \"Then pass the optimizers to the prepare call in the same order as corresponding models.\"\\n )\\n elif model_count == 1 and optimizer_present:\\n- logger.warn(\\n+ logger.warning(\\n \"FSDP Warning: When using FSDP, \"\\n \"it is efficient and recommended to call prepare for the model before creating the optimizer\"\\n )\\n@@ -1713,7 +1713,7 @@ def load_state(self, input_dir: str):\\n err = \"Warning! Number of found checkpoints does not match the number of registered objects:\"\\n err += f\"\\\\n\\\\tFound checkpoints: {len(custom_checkpoints)}\"\\n err += f\"\\\\n\\\\tRegistered objects: {len(self._custom_objects)}\\\\nSkipping.\"\\n- logger.warn(err)\\n+ logger.warning(err)\\n else:\\n logger.info(f\"Loading in {len(custom_checkpoints)} custom states\")\\n for index, obj in enumerate(self._custom_objects):\\ndiff --git a/src/accelerate/commands/launch.py b/src/accelerate/commands/launch.py\\nindex b2d95bad8..9e37bf0db 100644\\n--- a/src/accelerate/commands/launch.py\\n+++ b/src/accelerate/commands/launch.py\\n@@ -1085,7 +1085,7 @@ def launch_command(args):\\n message += (\\n \"\\\\nTo avoid this warning pass in values for each of the problematic parameters or run `accelerate config`.\"\\n )\\n- logger.warn(message)\\n+ logger.warning(message)\\n \\n # Use the proper launcher\\n if args.use_deepspeed and not args.cpu:\\n',\n", - " 'code_comments': [],\n", - " 'context': [{'body': '_The documentation is not available anymore as the PR was closed or merged._',\n", - " 'from_author': False}],\n", - " 'url': 'https://api.github.com/repos/huggingface/accelerate/pulls/881'},\n", - " 1130077770: {'diff': 'diff --git a/src/accelerate/accelerator.py b/src/accelerate/accelerator.py\\nindex d6a323559..be366c6e3 100644\\n--- a/src/accelerate/accelerator.py\\n+++ b/src/accelerate/accelerator.py\\n@@ -95,6 +95,12 @@\\n if is_tpu_available(check_device=False):\\n import torch_xla.distributed.xla_multiprocessing as xmp\\n \\n+\\n+if is_torch_version(\"<=\", \"1.13.5\"):\\n+ from torch.optim.lr_scheduler import _LRScheduler as LRScheduler\\n+else:\\n+ from torch.optim.lr_scheduler import LRScheduler as LRScheduler\\n+\\n logger = get_logger(__name__)\\n \\n \\n@@ -725,7 +731,7 @@ def _prepare_one(self, obj, first_pass=False, device_placement=None):\\n optimizer = self.prepare_optimizer(obj, device_placement=device_placement)\\n return optimizer\\n # Second pass of preparation: LR scheduler (which need the full list of optimizers)\\n- elif isinstance(obj, torch.optim.lr_scheduler._LRScheduler):\\n+ elif isinstance(obj, LRScheduler):\\n scheduler = self.prepare_scheduler(obj)\\n return scheduler\\n # Return the unprocessed object if previous criteria was not met\\n',\n", - " 'code_comments': [],\n", - " 'context': [{'body': '_The documentation is not available anymore as the PR was closed or merged._',\n", - " 'from_author': False}],\n", - " 'url': 'https://api.github.com/repos/huggingface/accelerate/pulls/878'},\n", - " 1129923134: {'diff': 'diff --git a/src/accelerate/utils/dataclasses.py b/src/accelerate/utils/dataclasses.py\\nindex bb14078d4..442b63cf5 100644\\n--- a/src/accelerate/utils/dataclasses.py\\n+++ b/src/accelerate/utils/dataclasses.py\\n@@ -31,6 +31,7 @@\\n import torch\\n \\n from .constants import FSDP_AUTO_WRAP_POLICY, FSDP_BACKWARD_PREFETCH, FSDP_STATE_DICT_TYPE, MODEL_NAME, OPTIMIZER_NAME\\n+from .versions import is_torch_version\\n \\n \\n class KwargsHandler:\\n@@ -526,9 +527,9 @@ def __post_init__(self):\\n from torch.distributed.fsdp.fully_sharded_data_parallel import (\\n BackwardPrefetch,\\n CPUOffload,\\n+ FullStateDictConfig,\\n ShardingStrategy,\\n StateDictType,\\n- _state_dict_type_to_config,\\n )\\n \\n if self.sharding_strategy is None:\\n@@ -549,12 +550,8 @@ def __post_init__(self):\\n state_dict_type_policy = os.environ.get(\"FSDP_STATE_DICT_TYPE\", \"FULL_STATE_DICT\")\\n self.state_dict_type = StateDictType(FSDP_STATE_DICT_TYPE.index(state_dict_type_policy) + 1)\\n \\n- if self.state_dict_type == StateDictType.FULL_STATE_DICT:\\n- self.state_dict_config = _state_dict_type_to_config[self.state_dict_type](\\n- offload_to_cpu=True, rank0_only=True\\n- )\\n- else:\\n- self.state_dict_config = _state_dict_type_to_config[self.state_dict_type]()\\n+ if self.state_dict_type == StateDictType.FULL_STATE_DICT and self.state_dict_config is None:\\n+ self.state_dict_config = FullStateDictConfig(offload_to_cpu=True, rank0_only=True)\\n \\n @staticmethod\\n def get_module_class_from_name(module, name):\\n@@ -616,9 +613,14 @@ def save_model(self, accelerator, model, output_dir, model_index=0):\\n from torch.distributed.fsdp.fully_sharded_data_parallel import FullyShardedDataParallel as FSDP\\n from torch.distributed.fsdp.fully_sharded_data_parallel import StateDictType\\n \\n- if self.state_dict_type == StateDictType.FULL_STATE_DICT:\\n+ if is_torch_version(\"<=\", \"1.13.5\"):\\n with FSDP.state_dict_type(model, self.state_dict_type, self.state_dict_config):\\n state_dict = model.state_dict()\\n+ else:\\n+ FSDP.set_state_dict_type(model, self.state_dict_type, self.state_dict_config)\\n+ state_dict = model.state_dict()\\n+\\n+ if self.state_dict_type == StateDictType.FULL_STATE_DICT:\\n weights_name = f\"{MODEL_NAME}.bin\" if model_index == 0 else f\"{MODEL_NAME}_{model_index}.bin\"\\n output_model_file = os.path.join(output_dir, weights_name)\\n if accelerator.process_index == 0:\\n@@ -626,8 +628,6 @@ def save_model(self, accelerator, model, output_dir, model_index=0):\\n torch.save(state_dict, output_model_file)\\n print(f\"Model saved to {output_model_file}\")\\n else:\\n- with FSDP.state_dict_type(model, self.state_dict_type, self.state_dict_config):\\n- state_dict = model.state_dict()\\n weights_name = (\\n f\"{MODEL_NAME}_rank{accelerator.process_index}.bin\"\\n if model_index == 0\\n@@ -660,7 +660,12 @@ def load_model(self, accelerator, model, input_dir, model_index=0):\\n print(f\"Loading model from {input_model_file}\")\\n state_dict = torch.load(input_model_file)\\n print(f\"Model loaded from {input_model_file}\")\\n- with FSDP.state_dict_type(model, self.state_dict_type, self.state_dict_config):\\n+\\n+ if is_torch_version(\"<=\", \"1.13.5\"):\\n+ with FSDP.state_dict_type(model, self.state_dict_type, self.state_dict_config):\\n+ model.load_state_dict(state_dict)\\n+ else:\\n+ FSDP.set_state_dict_type(model, self.state_dict_type, self.state_dict_config)\\n model.load_state_dict(state_dict)\\n \\n def save_optimizer(self, accelerator, optimizer, model, output_dir, optimizer_index=0, optim_input=None):\\ndiff --git a/tests/fsdp/test_fsdp.py b/tests/fsdp/test_fsdp.py\\nindex 249d2b692..e90106c17 100644\\n--- a/tests/fsdp/test_fsdp.py\\n+++ b/tests/fsdp/test_fsdp.py\\n@@ -95,7 +95,7 @@ def test_backward_prefetch(self):\\n self.assertEqual(fsdp_plugin.backward_prefetch, BackwardPrefetch(i + 1))\\n \\n def test_state_dict_type(self):\\n- from torch.distributed.fsdp.fully_sharded_data_parallel import StateDictType, _state_dict_type_to_config\\n+ from torch.distributed.fsdp.fully_sharded_data_parallel import StateDictType\\n \\n for i, state_dict_type in enumerate(FSDP_STATE_DICT_TYPE):\\n env = self.dist_env.copy()\\n@@ -103,9 +103,6 @@ def test_state_dict_type(self):\\n with mockenv_context(**env):\\n fsdp_plugin = FullyShardedDataParallelPlugin()\\n self.assertEqual(fsdp_plugin.state_dict_type, StateDictType(i + 1))\\n- self.assertEqual(\\n- type(fsdp_plugin.state_dict_config), type(_state_dict_type_to_config[StateDictType(i + 1)]())\\n- )\\n if state_dict_type == \"FULL_STATE_DICT\":\\n self.assertTrue(fsdp_plugin.state_dict_config.offload_to_cpu)\\n self.assertTrue(fsdp_plugin.state_dict_config.rank0_only)\\n',\n", - " 'code_comments': [{'body': '```suggestion\\r\\n if is_torch_version(\"<=\", \"1.13.5\"):\\r\\n```\\r\\nJust in case they make a patch release before 1.14 is out.',\n", - " 'diff_hunk': '@@ -616,18 +613,21 @@ def save_model(self, accelerator, model, output_dir, model_index=0):\\n from torch.distributed.fsdp.fully_sharded_data_parallel import FullyShardedDataParallel as FSDP\\n from torch.distributed.fsdp.fully_sharded_data_parallel import StateDictType\\n \\n- if self.state_dict_type == StateDictType.FULL_STATE_DICT:\\n+ if is_torch_version(\"<=\", \"1.13.0\"):',\n", - " 'from_author': False}],\n", - " 'context': [{'body': '_The documentation is not available anymore as the PR was closed or merged._',\n", - " 'from_author': False}],\n", - " 'url': 'https://api.github.com/repos/huggingface/accelerate/pulls/877'},\n", - " 1128165008: {'diff': 'diff --git a/src/accelerate/big_modeling.py b/src/accelerate/big_modeling.py\\nindex e277e89a2..24857ad25 100644\\n--- a/src/accelerate/big_modeling.py\\n+++ b/src/accelerate/big_modeling.py\\n@@ -210,7 +210,8 @@ def dispatch_model(\\n device_map: Dict[str, Union[str, int, torch.device]],\\n main_device: Optional[torch.device] = None,\\n state_dict: Optional[Dict[str, torch.Tensor]] = None,\\n- offload_dir: Union[str, os.PathLike] = None,\\n+ offload_dir: Optional[Union[str, os.PathLike]] = None,\\n+ offload_index: Optional[Dict[str, str]] = None,\\n offload_buffers: bool = False,\\n preload_module_classes: Optional[List[str]] = None,\\n ):\\n@@ -231,6 +232,9 @@ def dispatch_model(\\n The state dict of the part of the model that will be kept on CPU.\\n offload_dir (`str` or `os.PathLike`):\\n The folder in which to offload the model weights (or where the model weights are already offloaded).\\n+ offload_index (`Dict`, *optional*):\\n+ A dictionary from weight name to their information (`dtype`/ `shape` or safetensors filename). Will default\\n+ to the index saved in `save_folder`.\\n offload_buffers (`bool`, *optional*, defaults to `False`):\\n Whether or not to offload the buffers with the model parameters.\\n preload_module_classes (`List[str]`, *optional*):\\n@@ -256,13 +260,15 @@ def dispatch_model(\\n state_dict = extract_submodules_state_dict(model.state_dict(), cpu_modules)\\n \\n disk_modules = [name for name, device in device_map.items() if device == \"disk\"]\\n- if offload_dir is None and len(disk_modules) > 0:\\n+ if offload_dir is None and offload_index is None and len(disk_modules) > 0:\\n raise ValueError(\\n \"We need an `offload_dir` to dispatch this model according to this `device_map`, the following submodules \"\\n f\"need to be offloaded: {\\', \\'.join(disk_modules)}.\"\\n )\\n- if len(disk_modules) > 0 and (\\n- not os.path.isdir(offload_dir) or not os.path.isfile(os.path.join(offload_dir, \"index.json\"))\\n+ if (\\n+ len(disk_modules) > 0\\n+ and offload_index is None\\n+ and (not os.path.isdir(offload_dir) or not os.path.isfile(os.path.join(offload_dir, \"index.json\")))\\n ):\\n disk_state_dict = extract_submodules_state_dict(model.state_dict(), disk_modules)\\n offload_state_dict(offload_dir, disk_state_dict)\\n@@ -273,8 +279,11 @@ def dispatch_model(\\n offloaded_devices = [\"disk\"] if main_device == \"cpu\" else [\"cpu\", \"disk\"]\\n offload = {name: device in offloaded_devices for name, device in device_map.items()}\\n save_folder = offload_dir if len(disk_modules) > 0 else None\\n- if state_dict is not None or save_folder is not None:\\n- weights_map = OffloadedWeightsLoader(state_dict=state_dict, save_folder=save_folder)\\n+ if state_dict is not None or save_folder is not None or offload_index is not None:\\n+ device = main_device if offload_index is not None else None\\n+ weights_map = OffloadedWeightsLoader(\\n+ state_dict=state_dict, save_folder=save_folder, index=offload_index, device=device\\n+ )\\n else:\\n weights_map = None\\n \\ndiff --git a/src/accelerate/utils/__init__.py b/src/accelerate/utils/__init__.py\\nindex 87f891021..f46bce267 100644\\n--- a/src/accelerate/utils/__init__.py\\n+++ b/src/accelerate/utils/__init__.py\\n@@ -34,6 +34,7 @@\\n is_megatron_lm_available,\\n is_mlflow_available,\\n is_rich_available,\\n+ is_safetensors_available,\\n is_sagemaker_available,\\n is_tensorboard_available,\\n is_tpu_available,\\ndiff --git a/src/accelerate/utils/imports.py b/src/accelerate/utils/imports.py\\nindex bfedee162..72d6202cd 100644\\n--- a/src/accelerate/utils/imports.py\\n+++ b/src/accelerate/utils/imports.py\\n@@ -101,6 +101,10 @@ def is_megatron_lm_available():\\n return False\\n \\n \\n+def is_safetensors_available():\\n+ return importlib.util.find_spec(\"safetensors\") is not None\\n+\\n+\\n def is_transformers_available():\\n return importlib.util.find_spec(\"transformers\") is not None\\n \\ndiff --git a/src/accelerate/utils/offload.py b/src/accelerate/utils/offload.py\\nindex bc0fcdef1..22f51cce1 100644\\n--- a/src/accelerate/utils/offload.py\\n+++ b/src/accelerate/utils/offload.py\\n@@ -20,6 +20,12 @@\\n import numpy as np\\n import torch\\n \\n+from ..logging import get_logger\\n+from .imports import is_safetensors_available\\n+\\n+\\n+logger = get_logger(__name__)\\n+\\n \\n def offload_weight(weight, weight_name, offload_folder, index=None):\\n dtype = None\\n@@ -131,8 +137,8 @@ class OffloadedWeightsLoader(Mapping):\\n save_folder (`str` or `os.PathLike`, *optional*):\\n The directory in which the weights are stored (by `offload_state_dict` for instance).\\n index (`Dict`, *optional*):\\n- A dictionary from weight name to their information (`dtype` and `shape`). Will default to the index saved\\n- in `save_folder`.\\n+ A dictionary from weight name to their information (`dtype`/ `shape` or safetensors filename). Will default\\n+ to the index saved in `save_folder`.\\n \"\"\"\\n \\n def __init__(\\n@@ -140,6 +146,7 @@ def __init__(\\n state_dict: Dict[str, torch.Tensor] = None,\\n save_folder: Optional[Union[str, os.PathLike]] = None,\\n index: Mapping = None,\\n+ device=None,\\n ):\\n if state_dict is None and save_folder is None:\\n raise ValueError(\"Need either a `state_dict` or a `save_folder` containing offloaded weights.\")\\n@@ -152,12 +159,32 @@ def __init__(\\n self.index = {} if index is None else index\\n self.all_keys = list(self.state_dict.keys())\\n self.all_keys.extend([key for key in self.index if key not in self.all_keys])\\n+ self.device = device\\n \\n def __getitem__(self, key: str):\\n # State dict gets priority\\n if key in self.state_dict:\\n return self.state_dict[key]\\n weight_info = self.index[key]\\n+ if weight_info.get(\"safetensors_file\") is not None:\\n+ if not is_safetensors_available():\\n+ raise ImportError(\"These offloaded weights require the use of safetensors: `pip install safetensors`.\")\\n+\\n+ if \"SAFETENSORS_FAST_GPU\" not in os.environ:\\n+ logger.info(\"Enabling fast loading with safetensors by setting `SAFETENSORS_FAST_GPU` to 1.\")\\n+ os.environ[\"SAFETENSORS_FAST_GPU\"] = \"1\"\\n+\\n+ from safetensors import safe_open\\n+\\n+ device = \"cpu\" if self.device is None else self.device\\n+ with safe_open(weight_info[\"safetensors_file\"], framework=\"pt\", device=device) as f:\\n+ tensor = f.get_tensor(weight_info.get(\"weight_name\", key))\\n+\\n+ if \"dtype\" in weight_info:\\n+ return tensor.to(getattr(torch, weight_info[\"dtype\"]))\\n+ else:\\n+ return tensor\\n+\\n weight_file = os.path.join(self.save_folder, f\"{key}.dat\")\\n return load_offloaded_weight(weight_file, weight_info)\\n \\n',\n", - " 'code_comments': [{'body': 'We need a guard here for `is_safetensors_available` and raise an error if not :) (aka make one :) )',\n", - " 'diff_hunk': '@@ -152,12 +153,25 @@ def __init__(\\n self.index = {} if index is None else index\\n self.all_keys = list(self.state_dict.keys())\\n self.all_keys.extend([key for key in self.index if key not in self.all_keys])\\n+ self.device = device\\n \\n def __getitem__(self, key: str):\\n # State dict gets priority\\n if key in self.state_dict:\\n return self.state_dict[key]\\n weight_info = self.index[key]\\n+ if weight_info.get(\"safetensors_file\") is not None:\\n+ from safetensors import safe_open',\n", - " 'from_author': False},\n", - " {'body': 'Indeed, will clean that up on Monday!',\n", - " 'diff_hunk': '@@ -152,12 +153,25 @@ def __init__(\\n self.index = {} if index is None else index\\n self.all_keys = list(self.state_dict.keys())\\n self.all_keys.extend([key for key in self.index if key not in self.all_keys])\\n+ self.device = device\\n \\n def __getitem__(self, key: str):\\n # State dict gets priority\\n if key in self.state_dict:\\n return self.state_dict[key]\\n weight_info = self.index[key]\\n+ if weight_info.get(\"safetensors_file\") is not None:\\n+ from safetensors import safe_open',\n", - " 'from_author': True}],\n", - " 'context': [{'body': '_The documentation is not available anymore as the PR was closed or merged._',\n", - " 'from_author': False}],\n", - " 'url': 'https://api.github.com/repos/huggingface/accelerate/pulls/873'},\n", - " 1128136442: {'diff': 'diff --git a/src/accelerate/accelerator.py b/src/accelerate/accelerator.py\\nindex d6a323559..d614b47e2 100644\\n--- a/src/accelerate/accelerator.py\\n+++ b/src/accelerate/accelerator.py\\n@@ -926,6 +926,7 @@ def prepare_model(self, model: torch.nn.Module, device_placement=None):\\n kwargs = self.ddp_handler.to_kwargs() if self.ddp_handler is not None else {}\\n model = torch.nn.parallel.DistributedDataParallel(model, **kwargs)\\n if self.native_amp:\\n+ model._original_forward = model.forward\\n if self.mixed_precision == \"fp16\" and is_torch_version(\">=\", \"1.10\"):\\n model.forward = torch.cuda.amp.autocast(dtype=torch.float16)(model.forward)\\n elif self.mixed_precision == \"bf16\" and self.distributed_type != DistributedType.TPU:\\ndiff --git a/src/accelerate/utils/operations.py b/src/accelerate/utils/operations.py\\nindex 7ba4482cc..3ebea2eef 100644\\n--- a/src/accelerate/utils/operations.py\\n+++ b/src/accelerate/utils/operations.py\\n@@ -17,6 +17,7 @@\\n \"\"\"\\n \\n \\n+import pickle\\n from functools import update_wrapper\\n from typing import Any, Mapping\\n \\n@@ -473,8 +474,6 @@ class ConvertOutputsToFp32:\\n Decorator to apply to a function outputing tensors (like a model forward pass) that ensures the outputs in FP16\\n precision will be convert back to FP32.\\n \\n- Use a class instead of a decorator because otherwise, the prepared model can no longer be pickled (issue #273).\\n-\\n Args:\\n model_forward (`Callable`):\\n The function which outputs we want to treat.\\n@@ -490,6 +489,11 @@ def __init__(self, model_forward):\\n def __call__(self, *args, **kwargs):\\n return convert_to_fp32(self.model_forward(*args, **kwargs))\\n \\n+ def __getstate__(self):\\n+ raise pickle.PicklingError(\\n+ \"Cannot pickle a prepared model with automatic mixed precision, please unwrap the model with `Accelerator.unwrap_model(model)` before pickling it.\"\\n+ )\\n+\\n \\n convert_outputs_to_fp32 = ConvertOutputsToFp32\\n \\ndiff --git a/src/accelerate/utils/other.py b/src/accelerate/utils/other.py\\nindex a4a7b6312..1eb7cca88 100644\\n--- a/src/accelerate/utils/other.py\\n+++ b/src/accelerate/utils/other.py\\n@@ -21,7 +21,6 @@\\n from ..state import AcceleratorState\\n from .dataclasses import DistributedType\\n from .imports import is_deepspeed_available, is_tpu_available\\n-from .operations import ConvertOutputsToFp32\\n \\n \\n if is_deepspeed_available():\\n@@ -53,8 +52,13 @@ def extract_model_from_parallel(model, keep_fp32_wrapper: bool = False):\\n \\n if not keep_fp32_wrapper:\\n forward = getattr(model, \"forward\")\\n- if isinstance(forward, ConvertOutputsToFp32):\\n- setattr(model, \"forward\", forward.model_forward)\\n+ original_forward = model.__dict__.pop(\"_original_forward\", None)\\n+ if original_forward is not None:\\n+ while hasattr(forward, \"__wrapped__\"):\\n+ forward = forward.__wrapped__\\n+ if forward == original_forward:\\n+ break\\n+ model.forward = forward\\n return model\\n \\n \\ndiff --git a/tests/test_utils.py b/tests/test_utils.py\\nindex 1e9d18c17..7c7629a1a 100644\\n--- a/tests/test_utils.py\\n+++ b/tests/test_utils.py\\n@@ -19,8 +19,15 @@\\n \\n import torch\\n \\n+from accelerate.test_utils.testing import require_cuda\\n from accelerate.test_utils.training import RegressionModel\\n-from accelerate.utils import convert_outputs_to_fp32, find_device, patch_environment, send_to_device\\n+from accelerate.utils import (\\n+ convert_outputs_to_fp32,\\n+ extract_model_from_parallel,\\n+ find_device,\\n+ patch_environment,\\n+ send_to_device,\\n+)\\n \\n \\n ExampleNamedTuple = namedtuple(\"ExampleNamedTuple\", \"a b c\")\\n@@ -74,9 +81,20 @@ def test_patch_environment(self):\\n self.assertNotIn(\"AA\", os.environ)\\n self.assertNotIn(\"BB\", os.environ)\\n \\n- def test_convert_to_32_lets_model_pickle(self):\\n+ def test_can_undo_convert_outputs(self):\\n model = RegressionModel()\\n+ model._original_forward = model.forward\\n model.forward = convert_outputs_to_fp32(model.forward)\\n+ model = extract_model_from_parallel(model)\\n+ _ = pickle.dumps(model)\\n+\\n+ @require_cuda\\n+ def test_can_undo_fp16_conversion(self):\\n+ model = RegressionModel()\\n+ model._original_forward = model.forward\\n+ model.forward = torch.cuda.amp.autocast(dtype=torch.float16)(model.forward)\\n+ model.forward = convert_outputs_to_fp32(model.forward)\\n+ model = extract_model_from_parallel(model)\\n _ = pickle.dumps(model)\\n \\n def test_find_device(self):\\n',\n", - " 'code_comments': [{'body': \"@sgugger I know how you feel about `_` but I felt this was a decent time for one because the user really *shouldn't* need to know this exists at all or try to accidentally call it in any way\",\n", - " 'diff_hunk': '@@ -926,6 +926,7 @@ def prepare_model(self, model: torch.nn.Module, device_placement=None):\\n kwargs = self.ddp_handler.to_kwargs() if self.ddp_handler is not None else {}\\n model = torch.nn.parallel.DistributedDataParallel(model, **kwargs)\\n if self.native_amp:\\n+ model._original_forward = model.forward',\n", - " 'from_author': True},\n", - " {'body': 'Agree in this case :-)',\n", - " 'diff_hunk': '@@ -926,6 +926,7 @@ def prepare_model(self, model: torch.nn.Module, device_placement=None):\\n kwargs = self.ddp_handler.to_kwargs() if self.ddp_handler is not None else {}\\n model = torch.nn.parallel.DistributedDataParallel(model, **kwargs)\\n if self.native_amp:\\n+ model._original_forward = model.forward',\n", - " 'from_author': False},\n", - " {'body': 'Are we sure this does not make a regression with this issue?',\n", - " 'diff_hunk': '@@ -468,13 +468,11 @@ def _is_fp16_bf16_tensor(tensor):\\n return recursively_apply(_convert_to_fp32, tensor, test_type=_is_fp16_bf16_tensor)\\n \\n \\n-class ConvertOutputsToFp32:\\n+def convert_outputs_to_fp32(model_forward):\\n \"\"\"\\n Decorator to apply to a function outputing tensors (like a model forward pass) that ensures the outputs in FP16\\n precision will be convert back to FP32.\\n \\n- Use a class instead of a decorator because otherwise, the prepared model can no longer be pickled (issue #273).\\n-',\n", - " 'from_author': False},\n", - " {'body': \"The issue will always exist because it can never be pickled if we've wrapped it in a mixed precision decorator. So it will always have to go through `unwrap_model`. And this only ever comes up if we do mixed precision. See the nb shown in the bug report here: https://colab.research.google.com/drive/11fvrk1Jslw2VIRTF6h0pdGgJx5AkAIMv?usp=sharing\\r\\n\\r\\nWhere specifically our explicit wrapping around `torch.cuda.amp` causes issues, and technically was the original issue too.\\r\\n\\r\\nOr, phrasing it another way, the original issue doesn't come from Accelerate actually it comes from torch and using the autocast decorator. To keep things consistent this just makes them all use decorators so that we can quickly just unwrap all of them. \",\n", - " 'diff_hunk': '@@ -468,13 +468,11 @@ def _is_fp16_bf16_tensor(tensor):\\n return recursively_apply(_convert_to_fp32, tensor, test_type=_is_fp16_bf16_tensor)\\n \\n \\n-class ConvertOutputsToFp32:\\n+def convert_outputs_to_fp32(model_forward):\\n \"\"\"\\n Decorator to apply to a function outputing tensors (like a model forward pass) that ensures the outputs in FP16\\n precision will be convert back to FP32.\\n \\n- Use a class instead of a decorator because otherwise, the prepared model can no longer be pickled (issue #273).\\n-',\n", - " 'from_author': True},\n", - " {'body': \"I can try to see if it's possible to inject a warning before trying to pickle/save it and tell the user they need to unwrap the model first. \",\n", - " 'diff_hunk': '@@ -468,13 +468,11 @@ def _is_fp16_bf16_tensor(tensor):\\n return recursively_apply(_convert_to_fp32, tensor, test_type=_is_fp16_bf16_tensor)\\n \\n \\n-class ConvertOutputsToFp32:\\n+def convert_outputs_to_fp32(model_forward):\\n \"\"\"\\n Decorator to apply to a function outputing tensors (like a model forward pass) that ensures the outputs in FP16\\n precision will be convert back to FP32.\\n \\n- Use a class instead of a decorator because otherwise, the prepared model can no longer be pickled (issue #273).\\n-',\n", - " 'from_author': True},\n", - " {'body': \"Let's put your comment on the PR here as well, to explain what this code is doing 🙏 \",\n", - " 'diff_hunk': '@@ -53,8 +52,13 @@ def extract_model_from_parallel(model, keep_fp32_wrapper: bool = False):\\n \\n if not keep_fp32_wrapper:\\n forward = getattr(model, \"forward\")\\n- if isinstance(forward, ConvertOutputsToFp32):\\n- setattr(model, \"forward\", forward.model_forward)\\n+ original_forward = model.__dict__.pop(\"_original_forward\", None)\\n+ if original_forward is not None:\\n+ while hasattr(forward, \"__wrapped__\"):\\n+ forward = forward.__wrapped__\\n+ if forward == original_forward:\\n+ break',\n", - " 'from_author': False},\n", - " {'body': \"A custom `__getstate__` func let's us raise an error when the user tries to prepare a model that had automatic mixed precision performed, ensuring with a clear error what the proper behavior needs to be, and the user doesn't have to know that this is due to PyTorch *and* accelerate and their behaviors\",\n", - " 'diff_hunk': '@@ -490,6 +488,11 @@ def __init__(self, model_forward):\\n def __call__(self, *args, **kwargs):\\n return convert_to_fp32(self.model_forward(*args, **kwargs))\\n \\n+ def __getstate__(self):\\n+ raise pickle.PicklingError(\\n+ \"Cannot pickle a prepared model with automatic mixed precision, please unwrap the model with `Accelerator.unwrap_model(model)` before pickling it.\"\\n+ )\\n+',\n", - " 'from_author': True},\n", - " {'body': 'When a function is wrapped using functools (such that we have `torch.cuda.amp.autocast(dtype=torch.float16)(model.forward)` the wrapped function (the original `model.forward`) exists in the `__wrapped__` attribute. When using `functools.update_wrapper` (such as in `ConvertOutputsToFp32`) this will also in turn add a new `__wrapped__` function, allowing us to follow the chain of `__wrapped__` until we return to the original model forward function, which was stored away earlier.',\n", - " 'diff_hunk': '@@ -53,8 +52,13 @@ def extract_model_from_parallel(model, keep_fp32_wrapper: bool = False):\\n \\n if not keep_fp32_wrapper:\\n forward = getattr(model, \"forward\")\\n- if isinstance(forward, ConvertOutputsToFp32):\\n- setattr(model, \"forward\", forward.model_forward)\\n+ original_forward = model.__dict__.pop(\"_original_forward\", None)\\n+ if original_forward is not None:\\n+ while hasattr(forward, \"__wrapped__\"):\\n+ forward = forward.__wrapped__\\n+ if forward == original_forward:\\n+ break',\n", - " 'from_author': True}],\n", - " 'context': [{'body': '_The documentation is not available anymore as the PR was closed or merged._',\n", - " 'from_author': False},\n", - " {'body': 'Some notes on the final version (which will also be trickled into the code of the PR):\\r\\n\\r\\n- I kept this as a class so that we can implement a custom `__getstate__` which will return an explicit pickling error if someone tries to pickle a wrapped model with automatic mixed precision on it\\r\\n- When a function is wrapped using functools (such that we have `torch.cuda.amp.autocast(dtype=torch.float16)(model.forward)` the wrapped function (the original `model.forward`) exists in the `__wrapped__` attribute. When using `functools.update_wrapper` (such as in `ConvertOutputsToFp32`) this will also in turn add a new `__wrapped__` function, allowing us to follow the chain of `__wrapped__` until we return to the original model forward function, which was stored away earlier. ',\n", - " 'from_author': True}],\n", - " 'url': 'https://api.github.com/repos/huggingface/accelerate/pulls/872'},\n", - " 1127844339: {'diff': 'diff --git a/src/accelerate/logging.py b/src/accelerate/logging.py\\nindex 2eceb223e..679f2da5c 100644\\n--- a/src/accelerate/logging.py\\n+++ b/src/accelerate/logging.py\\n@@ -78,7 +78,8 @@ def get_logger(name: str, log_level: str = None):\\n ```\\n \"\"\"\\n if log_level is None:\\n- log_level = os.environ.get(\"ACCELERATE_LOG_LEVEL\", \"WARNING\")\\n+ log_level = os.environ.get(\"ACCELERATE_LOG_LEVEL\", None)\\n logger = logging.getLogger(name)\\n- logging.basicConfig(level=log_level.upper())\\n+ if log_level is not None:\\n+ logger.setLevel(log_level.upper())\\n return MultiProcessAdapter(logger, {})\\n',\n", - " 'code_comments': [],\n", - " 'context': [{'body': '_The documentation is not available anymore as the PR was closed or merged._',\n", - " 'from_author': False}],\n", - " 'url': 'https://api.github.com/repos/huggingface/accelerate/pulls/871'},\n", - " 1126820335: {'diff': 'diff --git a/src/accelerate/accelerator.py b/src/accelerate/accelerator.py\\nindex 6ac8d8d3a..d6a323559 100644\\n--- a/src/accelerate/accelerator.py\\n+++ b/src/accelerate/accelerator.py\\n@@ -953,7 +953,9 @@ def _prepare_deepspeed(self, *args):\\n batch_sizes = [batch_size // self.num_processes for batch_size in batch_sizes]\\n if len(batch_sizes) == 0:\\n raise ValueError(\\n- \"You must specify a training or evaluation dataloader in `accelerate.prepare()` when using DeepSpeed.\"\\n+ \"When using DeepSpeed `accelerate.prepare()` requires you to pass at least one of training or evaluation dataloaders \"\\n+ \"or alternatively set an integer value in `train_micro_batch_size_per_gpu` in the deepspeed config file\"\\n+ \"or assign integer value to `AcceleratorState().deepspeed_plugin.deepspeed_config[\\'train_micro_batch_size_per_gpu\\']`.\"\\n )\\n \\n batch_size_per_device = min(batch_sizes) if deepspeed_plugin.is_train_batch_min else max(batch_sizes)\\ndiff --git a/tests/deepspeed/test_deepspeed.py b/tests/deepspeed/test_deepspeed.py\\nindex 1be794421..86c9766a1 100644\\n--- a/tests/deepspeed/test_deepspeed.py\\n+++ b/tests/deepspeed/test_deepspeed.py\\n@@ -350,7 +350,9 @@ def test_prepare_deepspeed(self, optim_type, scheduler_type):\\n with self.assertRaises(ValueError) as cm:\\n model, optimizer, lr_scheduler = accelerator.prepare(model, optimizer, lr_scheduler)\\n self.assertTrue(\\n- \"You must specify a training or evaluation dataloader in `accelerate.prepare()` when using DeepSpeed.\"\\n+ \"When using DeepSpeed `accelerate.prepare()` requires you to pass at least one of training or evaluation dataloaders \"\\n+ \"or alternatively set an integer value in `train_micro_batch_size_per_gpu` in the deepspeed config file\"\\n+ \"or assign integer value to `AcceleratorState().deepspeed_plugin.deepspeed_config[\\'train_micro_batch_size_per_gpu\\']`.\"\\n in str(cm.exception)\\n )\\n \\n',\n", - " 'code_comments': [],\n", - " 'context': [{'body': '_The documentation is not available anymore as the PR was closed or merged._',\n", - " 'from_author': False}],\n", - " 'url': 'https://api.github.com/repos/huggingface/accelerate/pulls/868'},\n", - " 1126156577: {'diff': 'diff --git a/src/accelerate/commands/config/cluster.py b/src/accelerate/commands/config/cluster.py\\nindex a653b9d6a..a3dcd2dcb 100644\\n--- a/src/accelerate/commands/config/cluster.py\\n+++ b/src/accelerate/commands/config/cluster.py\\n@@ -63,21 +63,21 @@ def get_cluster_input():\\n if distributed_type in [DistributedType.MULTI_GPU, DistributedType.MULTI_CPU]:\\n num_machines = _ask_field(\\n \"How many different machines will you use (use more than 1 for multi-node training)? [1]: \",\\n- lambda x: int(x),\\n+ int,\\n default=1,\\n )\\n if num_machines > 1:\\n machine_rank = _ask_options(\\n \"What is the rank of this machine?\",\\n list(range(num_machines)),\\n- lambda x: int(x),\\n+ int,\\n )\\n main_process_ip = _ask_field(\\n \"What is the IP address of the machine that will host the main process? \",\\n )\\n main_process_port = _ask_field(\\n \"What is the port you will use to communicate with the main process? \",\\n- lambda x: int(x),\\n+ int,\\n )\\n same_network = _ask_field(\\n \"Are all the machines on the same local network? Answer `no` if nodes are on the cloud and/or on different network hosts [YES/no]: \",\\n@@ -153,14 +153,14 @@ def get_cluster_input():\\n if use_deepspeed_config:\\n deepspeed_config[\"deepspeed_config_file\"] = _ask_field(\\n \"Please enter the path to the json DeepSpeed config file: \",\\n- lambda x: str(x),\\n+ str,\\n default=\"none\",\\n )\\n else:\\n deepspeed_config[\"zero_stage\"] = _ask_options(\\n \"What should be your DeepSpeed\\'s ZeRO optimization stage?\",\\n [0, 1, 2, 3],\\n- lambda x: int(x),\\n+ int,\\n default=2,\\n )\\n \\n@@ -174,7 +174,7 @@ def get_cluster_input():\\n )\\n deepspeed_config[\"gradient_accumulation_steps\"] = _ask_field(\\n \"How many gradient accumulation steps you\\'re passing in your script? [1]: \",\\n- lambda x: int(x),\\n+ int,\\n default=1,\\n )\\n use_gradient_clipping = _ask_field(\\n@@ -186,7 +186,7 @@ def get_cluster_input():\\n if use_gradient_clipping:\\n deepspeed_config[\"gradient_clipping\"] = _ask_field(\\n \"What is the gradient clipping value? [1.0]: \",\\n- lambda x: float(x),\\n+ float,\\n default=1.0,\\n )\\n if deepspeed_config[\"zero_stage\"] == 3:\\n@@ -224,7 +224,7 @@ def get_cluster_input():\\n \"for more information please refer official [documentation]\"\\n \"(https://www.deepspeed.ai/getting-started/#resource-configuration-multi-node). \"\\n \"Please specify the location of hostfile: \",\\n- lambda x: str(x),\\n+ str,\\n )\\n \\n is_exclusion_filter = _ask_field(\\n@@ -236,7 +236,7 @@ def get_cluster_input():\\n if is_exclusion_filter:\\n deepspeed_config[\"deepspeed_exclusion_filter\"] = _ask_field(\\n \"DeepSpeed exclusion filter string: \",\\n- lambda x: str(x),\\n+ str,\\n )\\n \\n is_inclusion_filter = _ask_field(\\n@@ -248,7 +248,7 @@ def get_cluster_input():\\n if is_inclusion_filter:\\n deepspeed_config[\"deepspeed_inclusion_filter\"] = _ask_field(\\n \"DeepSpeed inclusion filter string: \",\\n- lambda x: str(x),\\n+ str,\\n )\\n \\n fsdp_config = {}\\n@@ -284,12 +284,12 @@ def get_cluster_input():\\n if fsdp_config[\"fsdp_auto_wrap_policy\"] == FSDP_AUTO_WRAP_POLICY[0]:\\n fsdp_config[\"fsdp_transformer_layer_cls_to_wrap\"] = _ask_field(\\n \"What is the transformer layer class name (case-sensitive) to wrap ,e.g, `BertLayer`, `GPTJBlock`, `T5Block` ...? : \",\\n- lambda x: str(x),\\n+ str,\\n )\\n elif fsdp_config[\"fsdp_auto_wrap_policy\"] == FSDP_AUTO_WRAP_POLICY[1]:\\n fsdp_config[\"fsdp_min_num_params\"] = _ask_field(\\n \"What should be your FSDP\\'s minimum number of parameters for Default Auto Wrapping Policy? [1e8]: \",\\n- lambda x: int(x),\\n+ int,\\n default=1e8,\\n )\\n fsdp_backward_prefetch_query = \"What should be your FSDP\\'s backward prefetch policy?\"\\n@@ -319,7 +319,7 @@ def get_cluster_input():\\n prefix = \"megatron_lm_\"\\n megatron_lm_config[prefix + \"tp_degree\"] = _ask_field(\\n \"What is the Tensor Parallelism degree/size? [1]:\",\\n- lambda x: int(x),\\n+ int,\\n default=1,\\n error_message=\"Please enter an integer.\",\\n )\\n@@ -333,14 +333,14 @@ def get_cluster_input():\\n \\n megatron_lm_config[prefix + \"pp_degree\"] = _ask_field(\\n \"What is the Pipeline Parallelism degree/size? [1]:\",\\n- lambda x: int(x),\\n+ int,\\n default=1,\\n error_message=\"Please enter an integer.\",\\n )\\n if megatron_lm_config[prefix + \"pp_degree\"] > 1:\\n megatron_lm_config[prefix + \"num_micro_batches\"] = _ask_field(\\n \"What is the number of micro-batches? [1]:\",\\n- lambda x: int(x),\\n+ int,\\n default=1,\\n error_message=\"Please enter an integer.\",\\n )\\n@@ -362,7 +362,7 @@ def get_cluster_input():\\n \\n megatron_lm_config[prefix + \"gradient_clipping\"] = _ask_field(\\n \"What is the gradient clipping value based on global L2 Norm (0 to disable)? [1.0]: \",\\n- lambda x: float(x),\\n+ float,\\n default=1.0,\\n )\\n \\n@@ -438,14 +438,14 @@ def get_cluster_input():\\n machine_type += \"(s)\"\\n num_processes = _ask_field(\\n f\"How many {machine_type} should be used for distributed training? [1]:\",\\n- lambda x: int(x),\\n+ int,\\n default=1,\\n error_message=\"Please enter an integer.\",\\n )\\n elif distributed_type in [DistributedType.FSDP, DistributedType.DEEPSPEED, DistributedType.MEGATRON_LM]:\\n num_processes = _ask_field(\\n \"How many GPU(s) should be used for distributed training? [1]:\",\\n- lambda x: int(x),\\n+ int,\\n default=1,\\n error_message=\"Please enter an integer.\",\\n )\\ndiff --git a/src/accelerate/commands/config/sagemaker.py b/src/accelerate/commands/config/sagemaker.py\\nindex 22264b41f..af4195f29 100644\\n--- a/src/accelerate/commands/config/sagemaker.py\\n+++ b/src/accelerate/commands/config/sagemaker.py\\n@@ -97,7 +97,7 @@ def get_sagemaker_input():\\n credentials_configuration = _ask_options(\\n \"How do you want to authorize?\",\\n [\"AWS Profile\", \"Credentials (AWS_ACCESS_KEY_ID, AWS_SECRET_ACCESS_KEY) \"],\\n- lambda x: int(x),\\n+ int,\\n )\\n aws_profile = None\\n if credentials_configuration == 0:\\n@@ -120,7 +120,7 @@ def get_sagemaker_input():\\n role_management = _ask_options(\\n \"Do you already have an IAM Role for executing Amazon SageMaker Training Jobs?\",\\n [\"Provide IAM Role name\", \"Create new IAM role using credentials\"],\\n- lambda x: int(x),\\n+ int,\\n )\\n if role_management == 0:\\n iam_role_name = _ask_field(\"Enter your IAM role name: \")\\n@@ -206,13 +206,10 @@ def get_sagemaker_input():\\n ec2_instance_type = _ask_field(ec2_instance_query, lambda x: str(x).lower(), default=\"ml.p3.2xlarge\")\\n \\n num_machines = 1\\n- if (\\n- distributed_type == SageMakerDistributedType.DATA_PARALLEL\\n- or distributed_type == SageMakerDistributedType.MODEL_PARALLEL\\n- ):\\n+ if distributed_type in (SageMakerDistributedType.DATA_PARALLEL, SageMakerDistributedType.MODEL_PARALLEL):\\n num_machines = _ask_field(\\n \"How many machines do you want use? [1]: \",\\n- lambda x: int(x),\\n+ int,\\n default=1,\\n )\\n \\ndiff --git a/src/accelerate/commands/launch.py b/src/accelerate/commands/launch.py\\nindex 8eefb9d7f..b2d95bad8 100644\\n--- a/src/accelerate/commands/launch.py\\n+++ b/src/accelerate/commands/launch.py\\n@@ -863,7 +863,7 @@ def _infer_type(s):\\n parser.add_argument(argument, action=action)\\n \\n return {\\n- key: (literal_eval(value) if value == \"True\" or value == \"False\" else value)\\n+ key: (literal_eval(value) if value in (\"True\", \"False\") else value)\\n for key, value in parser.parse_args(nargs).__dict__.items()\\n }\\n \\ndiff --git a/src/accelerate/data_loader.py b/src/accelerate/data_loader.py\\nindex 4f3930e1c..8a45856cd 100644\\n--- a/src/accelerate/data_loader.py\\n+++ b/src/accelerate/data_loader.py\\n@@ -13,6 +13,7 @@\\n # limitations under the License.\\n \\n import math\\n+from contextlib import suppress\\n from typing import List, Optional, Union\\n \\n import torch\\n@@ -364,12 +365,10 @@ def __iter__(self):\\n if self.rng_types is not None:\\n synchronize_rng_states(self.rng_types, self.synchronized_generator)\\n self.gradient_state._set_end_of_dataloader(False)\\n- try:\\n+ # We can safely pass because the default is -1\\n+ with suppress(Exception):\\n length = getattr(self.dataset, \"total_dataset_length\", len(self.dataset))\\n self.gradient_state._set_remainder(length % self.total_batch_size)\\n- except Exception:\\n- # We can safely pass because the default is -1\\n- pass\\n dataloader_iter = super().__iter__()\\n # We iterate one batch ahead to check when we are at the end\\n try:\\n@@ -448,12 +447,10 @@ def __init__(self, dataset, split_batches: bool = False, _drop_last: bool = Fals\\n self.gradient_state = GradientState()\\n self.state = AcceleratorState()\\n self._drop_last = _drop_last\\n- try:\\n+ # We can safely pass because the default is -1\\n+ with suppress(Exception):\\n length = getattr(self.dataset, \"total_dataset_length\", len(self.dataset))\\n self.gradient_state._set_remainder(length % self.total_batch_size)\\n- except Exception:\\n- # We can safely pass because the default is -1\\n- pass\\n \\n def _fetch_batches(self, iterator):\\n batches, batch = None, None\\ndiff --git a/src/accelerate/tracking.py b/src/accelerate/tracking.py\\nindex 689ee5033..75694ba72 100644\\n--- a/src/accelerate/tracking.py\\n+++ b/src/accelerate/tracking.py\\n@@ -593,7 +593,7 @@ def filter_trackers(\\n if getattr(tracker_init, \"requires_logging_directory\"):\\n if logging_dir is None:\\n raise ValueError(\\n- f\"Logging with `{str(log_type)}` requires a `logging_dir` to be passed in.\"\\n+ f\"Logging with `{log_type}` requires a `logging_dir` to be passed in.\"\\n )\\n loggers.append(log_type)\\n else:\\ndiff --git a/src/accelerate/utils/dataclasses.py b/src/accelerate/utils/dataclasses.py\\nindex bb14078d4..50013c4d5 100644\\n--- a/src/accelerate/utils/dataclasses.py\\n+++ b/src/accelerate/utils/dataclasses.py\\n@@ -228,7 +228,7 @@ def __str__(self):\\n @classmethod\\n def list(cls):\\n \"Method to list all the possible items in `cls`\"\\n- return list(map(lambda item: str(item), cls))\\n+ return list(map(str, cls))\\n \\n \\n class LoggerType(BaseEnum):\\ndiff --git a/src/accelerate/utils/launch.py b/src/accelerate/utils/launch.py\\nindex c65df08dc..1a4dcb4eb 100644\\n--- a/src/accelerate/utils/launch.py\\n+++ b/src/accelerate/utils/launch.py\\n@@ -88,7 +88,7 @@ def __call__(self, index, *args):\\n store=torch.distributed.FileStore(rdv_file, world_size),\\n world_size=world_size,\\n )\\n- elif self.distributed_type == DistributedType.MULTI_GPU or self.distributed_type == DistributedType.MULTI_CPU:\\n+ elif self.distributed_type in (DistributedType.MULTI_GPU, DistributedType.MULTI_CPU):\\n # Prepare the environment for torch.distributed\\n os.environ[\"LOCAL_RANK\"] = str(index)\\n os.environ[\"RANK\"] = str(index)\\ndiff --git a/src/accelerate/utils/megatron_lm.py b/src/accelerate/utils/megatron_lm.py\\nindex afee2fcca..ab3bd8c37 100644\\n--- a/src/accelerate/utils/megatron_lm.py\\n+++ b/src/accelerate/utils/megatron_lm.py\\n@@ -132,7 +132,7 @@ def prepare_model(accelerator):\\n custom_model_provider_func = accelerator.state.megatron_lm_plugin.custom_model_provider_function\\n model = accelerator.state.megatron_lm_plugin.custom_prepare_model_function(custom_model_provider_func)\\n else:\\n- if args.model_type_name == \"bert\" or args.model_type_name == \"gpt\":\\n+ if args.model_type_name in (\"bert\", \"gpt\"):\\n model_type = ModelType.encoder_or_decoder\\n elif args.model_type_name == \"t5\":\\n model_type = ModelType.encoder_and_decoder\\n@@ -566,7 +566,7 @@ def loss_func_finetune(labels, logits):\\n # We are doing regression\\n loss_fct = MSELoss()\\n loss = loss_fct(logits.view(-1), labels.view(-1))\\n- elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):\\n+ elif self.num_labels > 1 and (labels.dtype in (torch.long, torch.int)):\\n loss_fct = CrossEntropyLoss()\\n loss = loss_fct(logits.view(-1, num_labels), labels.view(-1))\\n else:\\n',\n", - " 'code_comments': [{'body': \"Disagree with this change here. It's more readable to explicitly have the `.keys()`.\",\n", - " 'diff_hunk': '@@ -98,7 +98,7 @@ def __call__(self, parser, namespace, values, option_string=None):\\n \"optional arguments\",\\n ]\\n if len(args) > 1:\\n- used_platforms = [arg for arg in args if arg in options_to_group.keys()]\\n+ used_platforms = [arg for arg in args if arg in options_to_group]',\n", - " 'from_author': False},\n", - " {'body': 'Not convinced this is more readable.',\n", - " 'diff_hunk': '@@ -245,7 +245,7 @@ def dispatch_model(\\n check_device_map(model, device_map)\\n \\n if main_device is None:\\n- if set(device_map.values()) == {\"cpu\"} or set(device_map.values()) == {\"cpu\", \"disk\"}:\\n+ if set(device_map.values()) in ({\"cpu\"}, {\"cpu\", \"disk\"}):',\n", - " 'from_author': False},\n", - " {'body': 'While this is technically equivalent, I prefer the explicit `else`.',\n", - " 'diff_hunk': '@@ -77,8 +77,7 @@ def handle_input(cls):\\n if handler:\\n cls.current_selection = char\\n return handler(cls)\\n- else:\\n- return None\\n+ return None',\n", - " 'from_author': False},\n", - " {'body': 'I prefer the explicit `else`.',\n", - " 'diff_hunk': '@@ -164,9 +165,8 @@ def __len__(self):\\n elif self.even_batches:\\n # When we even batches we always get +1\\n return length + 1\\n- else:\\n- # Otherwise it depends on the process index.\\n- return length + 1 if self.process_index < len(self.batch_sampler) % self.num_processes else length\\n+ # Otherwise it depends on the process index.\\n+ return length + 1 if self.process_index < len(self.batch_sampler) % self.num_processes else length',\n", - " 'from_author': False},\n", - " {'body': 'Same here.',\n", - " 'diff_hunk': '@@ -402,8 +400,7 @@ def total_batch_size(self):\\n def total_dataset_length(self):\\n if hasattr(\"total_length\", self.dataset):\\n return self.dataset.total_length\\n- else:\\n- return len(self.dataset)\\n+ return len(self.dataset)',\n", - " 'from_author': False},\n", - " {'body': 'Same here.',\n", - " 'diff_hunk': '@@ -548,8 +543,7 @@ def __len__(self):\\n return whole_length\\n elif self._drop_last:\\n return whole_length // self.state.num_processes\\n- else:\\n- return math.ceil(whole_length / self.state.num_processes)\\n+ return math.ceil(whole_length / self.state.num_processes)',\n", - " 'from_author': False},\n", - " {'body': \"While this is equivalent, `del values[names]` is explicit about what it's doing. `values.pop(name)` is not, so this should really be reverted.\",\n", - " 'diff_hunk': '@@ -500,7 +500,7 @@ def store_init_configuration(self, values: dict):\\n f\\'Trainer is attempting to log a value of \"{value}\" for key \"{name}\" as a parameter. MLflow\\\\\\'s\\'\\n f\" log_param() only accepts values no longer than {mlflow.utils.validation.MAX_PARAM_VAL_LENGTH} characters so we dropped this attribute.\"\\n )\\n- del values[name]\\n+ values.pop(name)',\n", - " 'from_author': False},\n", - " {'body': 'This is not equivalent, so this change should be reverted (f-strings use the repr, `str` does not).',\n", - " 'diff_hunk': '@@ -593,7 +593,7 @@ def filter_trackers(\\n if getattr(tracker_init, \"requires_logging_directory\"):\\n if logging_dir is None:\\n raise ValueError(\\n- f\"Logging with `{str(log_type)}` requires a `logging_dir` to be passed in.\"\\n+ f\"Logging with `{log_type}` requires a `logging_dir` to be passed in.\"',\n", - " 'from_author': False},\n", - " {'body': 'Leaving the else and return is clearer.',\n", - " 'diff_hunk': '@@ -538,8 +538,7 @@ def get_batch_transformer(data_iterator):\\n \\n if megatron_dataset_flag:\\n return get_batch_megatron\\n- else:\\n- return get_batch_transformer\\n+ return get_batch_transformer',\n", - " 'from_author': False},\n", - " {'body': 'Same here.',\n", - " 'diff_hunk': '@@ -577,8 +576,7 @@ def loss_func_finetune(labels, logits):\\n \\n if pretraining_flag:\\n return loss_func_pretrain\\n- else:\\n- return loss_func_finetune\\n+ return loss_func_finetune',\n", - " 'from_author': False},\n", - " {'body': 'Same here.',\n", - " 'diff_hunk': '@@ -666,8 +664,7 @@ def get_batch_transformer(data_iterator):\\n \\n if megatron_dataset_flag:\\n return get_batch_megatron\\n- else:\\n- return get_batch_transformer\\n+ return get_batch_transformer',\n", - " 'from_author': False},\n", - " {'body': 'Same here.',\n", - " 'diff_hunk': '@@ -803,8 +800,7 @@ def get_batch_transformer(data_iterator):\\n \\n if megatron_dataset_flag:\\n return get_batch_megatron\\n- else:\\n- return get_batch_transformer\\n+ return get_batch_transformer',\n", - " 'from_author': False},\n", - " {'body': 'Same here.',\n", - " 'diff_hunk': '@@ -1118,8 +1114,7 @@ def eval_step(self, **batch_data):\\n else:\\n loss_reduced[key] = torch.concat(losses_reduced_for_key)\\n return loss_reduced\\n- else:\\n- return {}\\n+ return {}',\n", - " 'from_author': False},\n", - " {'body': 'As above, this should be reverted.',\n", - " 'diff_hunk': '@@ -304,7 +304,7 @@ def clean_device_map(device_map: Dict[str, Union[int, str, torch.device]], modul\\n values = [v for k, v in device_map.items() if k.startswith(prefix)]\\n if len(set(values)) == 1 and len(values) > 1:\\n for k in [k for k in device_map if k.startswith(prefix)]:\\n- del device_map[k]\\n+ device_map.pop(k)',\n", - " 'from_author': False},\n", - " {'body': \"Let's leave the else return.\",\n", - " 'diff_hunk': '@@ -225,8 +225,7 @@ def gather(tensor):\\n return _gpu_gather(tensor)\\n elif AcceleratorState().distributed_type == DistributedType.MULTI_CPU:\\n return _cpu_gather(tensor)\\n- else:\\n- return tensor\\n+ return tensor',\n", - " 'from_author': False},\n", - " {'body': 'Same here.',\n", - " 'diff_hunk': '@@ -297,8 +296,7 @@ def broadcast(tensor, from_process: int = 0):\\n return _gpu_broadcast(tensor, src=from_process)\\n elif AcceleratorState().distributed_type == DistributedType.MULTI_CPU:\\n return _gpu_broadcast(tensor, src=from_process)\\n- else:\\n- return tensor\\n+ return tensor',\n", - " 'from_author': False},\n", - " {'body': 'Same here.',\n", - " 'diff_hunk': '@@ -439,8 +437,7 @@ def _reduce_across_processes(tensor, reduction=\"mean\"):\\n else:\\n if reduction == \"sum\":\\n return cloned_tensor.sum()\\n- else:\\n- return cloned_tensor.mean()\\n+ return cloned_tensor.mean()',\n", - " 'from_author': False},\n", - " {'body': \"I'd leave the `.keys()` as it's more readable this way.\",\n", - " 'diff_hunk': '@@ -40,7 +40,7 @@ def compare_versions(library_or_version: Union[str, Version], operation: str, re\\n requirement_version (`str`):\\n The version to compare the library version against\\n \"\"\"\\n- if operation not in STR_OPERATION_TO_FUNC.keys():\\n+ if operation not in STR_OPERATION_TO_FUNC:',\n", - " 'from_author': False},\n", - " {'body': 'For the record I quickly tested it and it seems that it actually uses `__str__`, not `__rep__`. \\r\\n\\r\\n```py\\r\\nclass A:\\r\\n def __str__(self):\\r\\n return \"Use str\"\\r\\n\\r\\n def __repr__(self):\\r\\n return \"Use repr\"\\r\\n\\r\\na = A()\\r\\nprint(a)\\r\\nprint(\"%s\" % a)\\r\\nprint(f\"{a}\")\\r\\nprint(f\"{str(a)}\")\\r\\nprint(\"{}\".format(a))\\r\\n```\\r\\n\\r\\nOutput (both in python 3.7 and 3.10)\\r\\n```\\r\\nUse str\\r\\nUse str\\r\\nUse str\\r\\nUse str\\r\\nUse str\\r\\n```',\n", - " 'diff_hunk': '@@ -593,7 +593,7 @@ def filter_trackers(\\n if getattr(tracker_init, \"requires_logging_directory\"):\\n if logging_dir is None:\\n raise ValueError(\\n- f\"Logging with `{str(log_type)}` requires a `logging_dir` to be passed in.\"\\n+ f\"Logging with `{log_type}` requires a `logging_dir` to be passed in.\"',\n", - " 'from_author': False},\n", - " {'body': 'Oh! My bad then.',\n", - " 'diff_hunk': '@@ -593,7 +593,7 @@ def filter_trackers(\\n if getattr(tracker_init, \"requires_logging_directory\"):\\n if logging_dir is None:\\n raise ValueError(\\n- f\"Logging with `{str(log_type)}` requires a `logging_dir` to be passed in.\"\\n+ f\"Logging with `{log_type}` requires a `logging_dir` to be passed in.\"',\n", - " 'from_author': False},\n", - " {'body': \"It is actually a TIL for me, didn't know what to expect :)\",\n", - " 'diff_hunk': '@@ -593,7 +593,7 @@ def filter_trackers(\\n if getattr(tracker_init, \"requires_logging_directory\"):\\n if logging_dir is None:\\n raise ValueError(\\n- f\"Logging with `{str(log_type)}` requires a `logging_dir` to be passed in.\"\\n+ f\"Logging with `{log_type}` requires a `logging_dir` to be passed in.\"',\n", - " 'from_author': False}],\n", - " 'context': [{'body': '_The documentation is not available anymore as the PR was closed or merged._',\n", - " 'from_author': False}],\n", - " 'url': 'https://api.github.com/repos/huggingface/accelerate/pulls/865'},\n", - " 1125476819: {'diff': 'diff --git a/src/accelerate/accelerator.py b/src/accelerate/accelerator.py\\nindex 6ac8d8d3a..d6a323559 100644\\n--- a/src/accelerate/accelerator.py\\n+++ b/src/accelerate/accelerator.py\\n@@ -953,7 +953,9 @@ def _prepare_deepspeed(self, *args):\\n batch_sizes = [batch_size // self.num_processes for batch_size in batch_sizes]\\n if len(batch_sizes) == 0:\\n raise ValueError(\\n- \"You must specify a training or evaluation dataloader in `accelerate.prepare()` when using DeepSpeed.\"\\n+ \"When using DeepSpeed `accelerate.prepare()` requires you to pass at least one of training or evaluation dataloaders \"\\n+ \"or alternatively set an integer value in `train_micro_batch_size_per_gpu` in the deepspeed config file\"\\n+ \"or assign integer value to `AcceleratorState().deepspeed_plugin.deepspeed_config[\\'train_micro_batch_size_per_gpu\\']`.\"\\n )\\n \\n batch_size_per_device = min(batch_sizes) if deepspeed_plugin.is_train_batch_min else max(batch_sizes)\\n',\n", - " 'code_comments': [],\n", - " 'context': [{'body': '_The documentation is not available anymore as the PR was closed or merged._',\n", - " 'from_author': False}],\n", - " 'url': 'https://api.github.com/repos/huggingface/accelerate/pulls/861'},\n", - " 1125035113: {'diff': 'diff --git a/src/accelerate/accelerator.py b/src/accelerate/accelerator.py\\nindex 7a9ac92f4..6ac8d8d3a 100644\\n--- a/src/accelerate/accelerator.py\\n+++ b/src/accelerate/accelerator.py\\n@@ -1464,7 +1464,7 @@ def pad_across_processes(self, tensor, dim=0, pad_index=0, pad_first=False):\\n \"\"\"\\n return pad_across_processes(tensor, dim=dim, pad_index=pad_index, pad_first=pad_first)\\n \\n- def unwrap_model(self, model):\\n+ def unwrap_model(self, model, keep_fp32_wrapper: bool = False):\\n \"\"\"\\n Unwraps the `model` from the additional layer possible added by [`~Accelerator.prepare`]. Useful before saving\\n the model.\\n@@ -1472,8 +1472,10 @@ def unwrap_model(self, model):\\n Args:\\n model (`torch.nn.Module`):\\n The model to unwrap.\\n+ keep_fp32_wrapper (`bool`, *optional*, defaults to `False`):\\n+ Whether to not remove the mixed precision hook if it was added.\\n \"\"\"\\n- return extract_model_from_parallel(model)\\n+ return extract_model_from_parallel(model, keep_fp32_wrapper)\\n \\n def wait_for_everyone(self):\\n \"\"\"\\n@@ -1760,7 +1762,7 @@ def get_state_dict(self, model, unwrap=True):\\n Args:\\n model (`torch.nn.Module`):\\n A PyTorch model sent through [`Accelerator.prepare`]\\n- unwrap (`bool`, *optional*, defaults to True):\\n+ unwrap (`bool`, *optional*, defaults to `True`):\\n Whether to return the original underlying state_dict of `model` or to return the wrapped state_dict\\n \"\"\"\\n is_zero_3 = False\\ndiff --git a/src/accelerate/utils/other.py b/src/accelerate/utils/other.py\\nindex a9b6f9d4b..a4a7b6312 100644\\n--- a/src/accelerate/utils/other.py\\n+++ b/src/accelerate/utils/other.py\\n@@ -21,6 +21,7 @@\\n from ..state import AcceleratorState\\n from .dataclasses import DistributedType\\n from .imports import is_deepspeed_available, is_tpu_available\\n+from .operations import ConvertOutputsToFp32\\n \\n \\n if is_deepspeed_available():\\n@@ -30,12 +31,15 @@\\n import torch_xla.core.xla_model as xm\\n \\n \\n-def extract_model_from_parallel(model):\\n+def extract_model_from_parallel(model, keep_fp32_wrapper: bool = False):\\n \"\"\"\\n Extract a model from its distributed containers.\\n \\n Args:\\n- model (`torch.nn.Module`): The model to extract.\\n+ model (`torch.nn.Module`):\\n+ The model to extract.\\n+ keep_fp32_wrapper (`bool`, *optional*):\\n+ Whether to remove mixed precision hooks from the model.\\n \\n Returns:\\n `torch.nn.Module`: The extracted model.\\n@@ -46,6 +50,11 @@ def extract_model_from_parallel(model):\\n \\n while isinstance(model, options):\\n model = model.module\\n+\\n+ if not keep_fp32_wrapper:\\n+ forward = getattr(model, \"forward\")\\n+ if isinstance(forward, ConvertOutputsToFp32):\\n+ setattr(model, \"forward\", forward.model_forward)\\n return model\\n \\n \\n',\n", - " 'code_comments': [{'body': '```suggestion\\r\\n the model.\\r\\n```\\r\\nAdd this sentence when documenting the argument below instead.',\n", - " 'diff_hunk': '@@ -1464,16 +1464,16 @@ def pad_across_processes(self, tensor, dim=0, pad_index=0, pad_first=False):\\n \"\"\"\\n return pad_across_processes(tensor, dim=dim, pad_index=pad_index, pad_first=pad_first)\\n \\n- def unwrap_model(self, model):\\n+ def unwrap_model(self, model, keep_fp32_wrapper: bool = False):\\n \"\"\"\\n Unwraps the `model` from the additional layer possible added by [`~Accelerator.prepare`]. Useful before saving\\n- the model.\\n+ the model. Will also remove the mixed precision hook if it was added by default.',\n", - " 'from_author': False}],\n", - " 'context': [{'body': 'The docs for this PR live [here](/static-proxy?url=https%3A%2F%2Fmoon-ci-docs.huggingface.co%2Fdocs%2Faccelerate%2Fpr_860). All of your documentation changes will be reflected on that endpoint.',\n", - " 'from_author': False},\n", - " {'body': 'The docs for this PR live [here](/static-proxy?url=https%3A%2F%2Fmoon-ci-docs.huggingface.co%2Fdocs%2Faccelerate%2Fpr_860). All of your documentation changes will be reflected on that endpoint.',\n", - " 'from_author': False},\n", - " {'body': 'The docs for this PR live [here](/static-proxy?url=https%3A%2F%2Fmoon-ci-docs.huggingface.co%2Fdocs%2Faccelerate%2Fpr_860). All of your documentation changes will be reflected on that endpoint.',\n", - " 'from_author': False}],\n", - " 'url': 'https://api.github.com/repos/huggingface/accelerate/pulls/860'},\n", - " 1124948804: {'diff': 'diff --git a/src/accelerate/logging.py b/src/accelerate/logging.py\\nindex 46118f7c6..2eceb223e 100644\\n--- a/src/accelerate/logging.py\\n+++ b/src/accelerate/logging.py\\n@@ -78,7 +78,7 @@ def get_logger(name: str, log_level: str = None):\\n ```\\n \"\"\"\\n if log_level is None:\\n- log_level = os.environ.get(\"ACCELERATE_LOG_LEVEL\", \"INFO\")\\n+ log_level = os.environ.get(\"ACCELERATE_LOG_LEVEL\", \"WARNING\")\\n logger = logging.getLogger(name)\\n logging.basicConfig(level=log_level.upper())\\n return MultiProcessAdapter(logger, {})\\n',\n", - " 'code_comments': [],\n", - " 'context': [{'body': 'The docs for this PR live [here](/static-proxy?url=https%3A%2F%2Fmoon-ci-docs.huggingface.co%2Fdocs%2Faccelerate%2Fpr_859). All of your documentation changes will be reflected on that endpoint.',\n", - " 'from_author': False}],\n", - " 'url': 'https://api.github.com/repos/huggingface/accelerate/pulls/859'},\n", - " 1124252364: {'diff': 'diff --git a/.github/workflows/build_pr_documentation.yml b/.github/workflows/build_pr_documentation.yml\\nindex 12c600cb7..dc56751c6 100644\\n--- a/.github/workflows/build_pr_documentation.yml\\n+++ b/.github/workflows/build_pr_documentation.yml\\n@@ -9,11 +9,8 @@ concurrency:\\n \\n jobs:\\n build:\\n- uses: huggingface/doc-builder/.github/workflows/build_pr_documentation.yml@use_hf_hub\\n+ uses: huggingface/doc-builder/.github/workflows/build_pr_documentation.yml@main\\n with:\\n commit_sha: ${{ github.event.pull_request.head.sha }}\\n pr_number: ${{ github.event.number }}\\n package: accelerate\\n- secrets:\\n- token: ${{ secrets.HF_DOC_PUSH }}\\n- comment_bot_token: ${{ secrets.HUGGINGFACE_PUSH }}\\ndiff --git a/.github/workflows/delete_doc_comment.yml b/.github/workflows/delete_doc_comment.yml\\nindex 973c2a8b3..da61d21df 100644\\n--- a/.github/workflows/delete_doc_comment.yml\\n+++ b/.github/workflows/delete_doc_comment.yml\\n@@ -7,10 +7,7 @@ on:\\n \\n jobs:\\n delete:\\n- uses: huggingface/doc-builder/.github/workflows/delete_doc_comment.yml@use_hf_hub\\n+ uses: huggingface/doc-builder/.github/workflows/delete_doc_comment.yml@main\\n with:\\n pr_number: ${{ github.event.number }}\\n package: accelerate\\n- secrets:\\n- token: ${{ secrets.HF_DOC_PUSH }}\\n- comment_bot_token: ${{ secrets.HUGGINGFACE_PUSH }}\\n',\n", - " 'code_comments': [],\n", - " 'context': [{'body': 'The docs for this PR live [here](/static-proxy?url=https%3A%2F%2Fmoon-ci-docs.huggingface.co%2Fdocs%2Faccelerate%2Fpr_857). All of your documentation changes will be reflected on that endpoint.',\n", - " 'from_author': False}],\n", - " 'url': 'https://api.github.com/repos/huggingface/accelerate/pulls/857'},\n", - " 1123457165: {'diff': 'diff --git a/docs/source/package_reference/cli.mdx b/docs/source/package_reference/cli.mdx\\nindex eafec816e..3d72aada6 100644\\n--- a/docs/source/package_reference/cli.mdx\\n+++ b/docs/source/package_reference/cli.mdx\\n@@ -46,7 +46,7 @@ Create a default config file for Accelerate with only a few flags set.\\n **Usage**: \\n \\n ```bash\\n-accelerate default-config [arguments]\\n+accelerate config default [arguments]\\n ```\\n \\n **Optional Arguments**:\\n@@ -57,6 +57,28 @@ accelerate default-config [arguments]\\n * `-h`, `--help` (`bool`) -- Show a help message and exit\\n * `--mixed_precision {no,fp16,bf16}` (`str`) -- Whether or not to use mixed precision training. Choose between FP16 and BF16 (bfloat16) training. BF16 training is only supported on Nvidia Ampere GPUs and PyTorch 1.10 or later.\\n \\n+## accelerate config update\\n+\\n+**Command**:\\n+\\n+`accelerate config update` or `accelerate-config update`\\n+\\n+Update an existing config file with the latest defaults while maintaining the old configuration.\\n+\\n+**Usage**: \\n+\\n+```bash\\n+accelerate config update [arguments]\\n+```\\n+\\n+**Optional Arguments**:\\n+* `--config_file CONFIG_FILE` (`str`) -- The path to the config file to update. Will default to a file named default_config.yaml in the cache location, which is the content\\n+ of the environment `HF_HOME` suffixed with \\'accelerate\\', or if you don\\'t have such an environment variable, your cache directory\\n+ (`~/.cache` or the content of `XDG_CACHE_HOME`) suffixed with `huggingface`.\\n+\\n+* `-h`, `--help` (`bool`) -- Show a help message and exit\\n+\\n+\\n ## accelerate env\\n \\n **Command**:\\ndiff --git a/src/accelerate/commands/config/__init__.py b/src/accelerate/commands/config/__init__.py\\nindex ba9d6cf30..9b1545982 100644\\n--- a/src/accelerate/commands/config/__init__.py\\n+++ b/src/accelerate/commands/config/__init__.py\\n@@ -16,27 +16,22 @@\\n \\n import argparse\\n \\n-from .config import config_command, config_command_parser\\n+from .config import config_command_parser\\n from .config_args import default_config_file, load_config_from_file # noqa: F401\\n-from .default import default_command_parser, default_config_command\\n-\\n-\\n-def filter_command_args(args: dict, args_prefix: str):\\n- \"Filters args while only keeping ones that are prefixed with `{args_prefix}.`\"\\n- new_args = argparse.Namespace()\\n- for key, value in vars(args).items():\\n- if key.startswith(args_prefix):\\n- setattr(new_args, key.replace(f\"{args_prefix}.\", \"\"), value)\\n- return new_args\\n+from .default import default_command_parser\\n+from .update import update_command_parser\\n \\n \\n def get_config_parser(subparsers=None):\\n parent_parser = argparse.ArgumentParser(add_help=False)\\n # The main config parser\\n config_parser = config_command_parser(subparsers)\\n+ # The subparser to add commands to\\n+ subcommands = config_parser.add_subparsers(title=\"subcommands\", dest=\"subcommand\")\\n \\n # Then add other parsers with the parent parser\\n- default_parser = default_command_parser(config_parser, parents=[parent_parser]) # noqa: F841\\n+ default_command_parser(subcommands, parents=[parent_parser])\\n+ update_command_parser(subcommands, parents=[parent_parser])\\n \\n return config_parser\\n \\n@@ -44,12 +39,13 @@ def get_config_parser(subparsers=None):\\n def main():\\n config_parser = get_config_parser()\\n args = config_parser.parse_args()\\n- if not args.default:\\n- args = filter_command_args(args, \"config_args\")\\n- config_command(args)\\n- elif args.default:\\n- args = filter_command_args(args, \"default_args\")\\n- default_config_command(args)\\n+\\n+ if not hasattr(args, \"func\"):\\n+ config_parser.print_help()\\n+ exit(1)\\n+\\n+ # Run\\n+ args.func(args)\\n \\n \\n if __name__ == \"__main__\":\\ndiff --git a/src/accelerate/commands/config/config.py b/src/accelerate/commands/config/config.py\\nindex b504f07ad..72414f2ab 100644\\n--- a/src/accelerate/commands/config/config.py\\n+++ b/src/accelerate/commands/config/config.py\\n@@ -21,13 +21,7 @@\\n \\n from .cluster import get_cluster_input\\n from .config_args import cache_dir, default_config_file, default_yaml_config_file, load_config_from_file # noqa: F401\\n-from .config_utils import ( # noqa: F401\\n- GroupedAction,\\n- SubcommandHelpFormatter,\\n- _ask_field,\\n- _ask_options,\\n- _convert_compute_environment,\\n-)\\n+from .config_utils import _ask_field, _ask_options, _convert_compute_environment # noqa: F401\\n from .sagemaker import get_sagemaker_input\\n \\n \\n@@ -49,18 +43,13 @@ def get_user_input():\\n \\n def config_command_parser(subparsers=None):\\n if subparsers is not None:\\n- parser = subparsers.add_parser(\"config\", description=description, formatter_class=SubcommandHelpFormatter)\\n+ parser = subparsers.add_parser(\"config\", description=description)\\n else:\\n- parser = argparse.ArgumentParser(\\n- \"Accelerate config command\", description=description, formatter_class=SubcommandHelpFormatter\\n- )\\n+ parser = argparse.ArgumentParser(\"Accelerate config command\", description=description)\\n \\n parser.add_argument(\\n \"--config_file\",\\n default=None,\\n- dest=\"config_args.config_file\",\\n- metavar=\"CONFIG_FILE\",\\n- action=GroupedAction,\\n help=(\\n \"The path to use to store the config file. Will default to a file named default_config.yaml in the cache \"\\n \"location, which is the content of the environment `HF_HOME` suffixed with \\'accelerate\\', or if you don\\'t have \"\\n@@ -87,6 +76,7 @@ def config_command(args):\\n config.to_json_file(config_file)\\n else:\\n config.to_yaml_file(config_file)\\n+ print(f\"accelerate configuration saved at {config_file}\")\\n \\n \\n def main():\\ndiff --git a/src/accelerate/commands/config/config_utils.py b/src/accelerate/commands/config/config_utils.py\\nindex 1deee9f32..736c3a3d5 100644\\n--- a/src/accelerate/commands/config/config_utils.py\\n+++ b/src/accelerate/commands/config/config_utils.py\\n@@ -88,29 +88,11 @@ def _convert_yes_no_to_bool(value):\\n return {\"yes\": True, \"no\": False}[value.lower()]\\n \\n \\n-class GroupedAction(argparse.Action):\\n- \"\"\"\\n- Filters arguments into seperate namespace groups based on the first part of the argument name.\\n- \"\"\"\\n-\\n- def __call__(self, parser, namespace, values, option_string=None):\\n- group, dest = self.dest.split(\".\", 2)\\n- groupspace = getattr(namespace, group, argparse.Namespace())\\n- setattr(groupspace, dest, values)\\n- setattr(namespace, group, groupspace)\\n-\\n-\\n class SubcommandHelpFormatter(argparse.RawDescriptionHelpFormatter):\\n \"\"\"\\n A custom formatter that will remove the usage line from the help message for subcommands.\\n \"\"\"\\n \\n- def _format_action(self, action):\\n- parts = super()._format_action(action)\\n- if action.nargs == argparse.PARSER:\\n- parts = \"\\\\n\".join(parts.split(\"\\\\n\")[1:])\\n- return parts\\n-\\n def _format_usage(self, usage, actions, groups, prefix):\\n usage = super()._format_usage(usage, actions, groups, prefix)\\n usage = usage.replace(\" [] \", \"\")\\ndiff --git a/src/accelerate/commands/config/default.py b/src/accelerate/commands/config/default.py\\nindex b87a1d45b..f7ceb84ce 100644\\n--- a/src/accelerate/commands/config/default.py\\n+++ b/src/accelerate/commands/config/default.py\\n@@ -14,13 +14,15 @@\\n # See the License for the specific language governing permissions and\\n # limitations under the License.\\n \\n-import argparse\\n from pathlib import Path\\n \\n import torch\\n \\n from .config_args import ClusterConfig, default_json_config_file\\n-from .config_utils import GroupedAction\\n+from .config_utils import SubcommandHelpFormatter\\n+\\n+\\n+description = \"Create a default config file for Accelerate with only a few flags set.\"\\n \\n \\n def write_basic_config(mixed_precision=\"no\", save_location: str = default_json_config_file, dynamo_backend=\"no\"):\\n@@ -42,7 +44,7 @@ def write_basic_config(mixed_precision=\"no\", save_location: str = default_json_c\\n print(\\n f\"Configuration already exists at {save_location}, will not override. Run `accelerate config` manually or pass a different `save_location`.\"\\n )\\n- return\\n+ return False\\n mixed_precision = mixed_precision.lower()\\n if mixed_precision not in [\"no\", \"fp16\", \"bf16\"]:\\n raise ValueError(f\"`mixed_precision` should be one of \\'no\\', \\'fp16\\', or \\'bf16\\'. Received {mixed_precision}\")\\n@@ -64,20 +66,13 @@ def write_basic_config(mixed_precision=\"no\", save_location: str = default_json_c\\n config[\"use_cpu\"] = True\\n config[\"num_processes\"] = 1\\n config[\"distributed_type\"] = \"NO\"\\n- if not path.exists():\\n- config = ClusterConfig(**config)\\n- config.to_json_file(path)\\n+ config = ClusterConfig(**config)\\n+ config.to_json_file(path)\\n+ return path\\n \\n \\n-description = \"Create a default config file for Accelerate with only a few flags set.\"\\n-\\n-\\n-def default_command_parser(parser=None, parents=None):\\n- if parser is None and parents is None:\\n- parser = argparse.ArgumentParser(description=description)\\n- else:\\n- default_parser = parser.add_subparsers(title=\"subcommand {default}\", dest=\"default\", description=description)\\n- parser = default_parser.add_parser(\"default\", parents=parents)\\n+def default_command_parser(parser, parents):\\n+ parser = parser.add_parser(\"default\", parents=parents, help=description, formatter_class=SubcommandHelpFormatter)\\n parser.add_argument(\\n \"--config_file\",\\n default=default_json_config_file,\\n@@ -87,9 +82,7 @@ def default_command_parser(parser=None, parents=None):\\n \"such an environment variable, your cache directory (\\'~/.cache\\' or the content of `XDG_CACHE_HOME`) suffixed \"\\n \"with \\'huggingface\\'.\"\\n ),\\n- dest=\"default_args.save_location\",\\n- metavar=\"CONFIG_FILE\",\\n- action=GroupedAction,\\n+ dest=\"save_location\",\\n )\\n \\n parser.add_argument(\\n@@ -100,14 +93,12 @@ def default_command_parser(parser=None, parents=None):\\n \"Choose between FP16 and BF16 (bfloat16) training. \"\\n \"BF16 training is only supported on Nvidia Ampere GPUs and PyTorch 1.10 or later.\",\\n default=\"no\",\\n- dest=\"default_args.mixed_precision\",\\n- action=GroupedAction,\\n )\\n parser.set_defaults(func=default_config_command)\\n return parser\\n \\n \\n def default_config_command(args):\\n- args = vars(args)\\n- args.pop(\"func\", None)\\n- write_basic_config(**args)\\n+ config_file = write_basic_config(args.mixed_precision, args.save_location)\\n+ if config_file:\\n+ print(f\"accelerate configuration saved at {config_file}\")\\ndiff --git a/src/accelerate/commands/config/update.py b/src/accelerate/commands/config/update.py\\nnew file mode 100644\\nindex 000000000..5f025594b\\n--- /dev/null\\n+++ b/src/accelerate/commands/config/update.py\\n@@ -0,0 +1,63 @@\\n+#!/usr/bin/env python\\n+\\n+# Copyright 2022 The HuggingFace Team. All rights reserved.\\n+#\\n+# Licensed under the Apache License, Version 2.0 (the \"License\");\\n+# you may not use this file except in compliance with the License.\\n+# You may obtain a copy of the License at\\n+#\\n+# http://www.apache.org/licenses/LICENSE-2.0\\n+#\\n+# Unless required by applicable law or agreed to in writing, software\\n+# distributed under the License is distributed on an \"AS IS\" BASIS,\\n+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\\n+# See the License for the specific language governing permissions and\\n+# limitations under the License.\\n+\\n+from pathlib import Path\\n+\\n+from .config_args import default_config_file, load_config_from_file\\n+from .config_utils import SubcommandHelpFormatter\\n+\\n+\\n+description = \"Update an existing config file with the latest defaults while maintaining the old configuration.\"\\n+\\n+\\n+def update_config(args):\\n+ \"\"\"\\n+ Update an existing config file with the latest defaults while maintaining the old configuration.\\n+ \"\"\"\\n+ config_file = args.config_file\\n+ if config_file is None and Path(default_config_file).exists():\\n+ config_file = default_config_file\\n+ elif not Path(config_file).exists():\\n+ raise ValueError(f\"The passed config file located at {config_file} doesn\\'t exist.\")\\n+ config = load_config_from_file(config_file)\\n+\\n+ if config_file.endswith(\".json\"):\\n+ config.to_json_file(config_file)\\n+ else:\\n+ config.to_yaml_file(config_file)\\n+ return config_file\\n+\\n+\\n+def update_command_parser(parser, parents):\\n+ parser = parser.add_parser(\"update\", parents=parents, help=description, formatter_class=SubcommandHelpFormatter)\\n+ parser.add_argument(\\n+ \"--config_file\",\\n+ default=None,\\n+ help=(\\n+ \"The path to the config file to update. Will default to a file named default_config.yaml in the cache \"\\n+ \"location, which is the content of the environment `HF_HOME` suffixed with \\'accelerate\\', or if you don\\'t have \"\\n+ \"such an environment variable, your cache directory (\\'~/.cache\\' or the content of `XDG_CACHE_HOME`) suffixed \"\\n+ \"with \\'huggingface\\'.\"\\n+ ),\\n+ )\\n+\\n+ parser.set_defaults(func=update_config_command)\\n+ return parser\\n+\\n+\\n+def update_config_command(args):\\n+ config_file = update_config(args)\\n+ print(f\"Sucessfully updated the configuration file at {config_file}.\")\\n',\n", - " 'code_comments': [],\n", - " 'context': [{'body': 'The docs for this PR live [here](/static-proxy?url=https%3A%2F%2Fmoon-ci-docs.huggingface.co%2Fdocs%2Faccelerate%2Fpr_855). All of your documentation changes will be reflected on that endpoint.',\n", - " 'from_author': False}],\n", - " 'url': 'https://api.github.com/repos/huggingface/accelerate/pulls/855'},\n", - " 1123180567: {'diff': 'diff --git a/docs/source/package_reference/cli.mdx b/docs/source/package_reference/cli.mdx\\nindex eafec816e..3d72aada6 100644\\n--- a/docs/source/package_reference/cli.mdx\\n+++ b/docs/source/package_reference/cli.mdx\\n@@ -46,7 +46,7 @@ Create a default config file for Accelerate with only a few flags set.\\n **Usage**: \\n \\n ```bash\\n-accelerate default-config [arguments]\\n+accelerate config default [arguments]\\n ```\\n \\n **Optional Arguments**:\\n@@ -57,6 +57,28 @@ accelerate default-config [arguments]\\n * `-h`, `--help` (`bool`) -- Show a help message and exit\\n * `--mixed_precision {no,fp16,bf16}` (`str`) -- Whether or not to use mixed precision training. Choose between FP16 and BF16 (bfloat16) training. BF16 training is only supported on Nvidia Ampere GPUs and PyTorch 1.10 or later.\\n \\n+## accelerate config update\\n+\\n+**Command**:\\n+\\n+`accelerate config update` or `accelerate-config update`\\n+\\n+Update an existing config file with the latest defaults while maintaining the old configuration.\\n+\\n+**Usage**: \\n+\\n+```bash\\n+accelerate config update [arguments]\\n+```\\n+\\n+**Optional Arguments**:\\n+* `--config_file CONFIG_FILE` (`str`) -- The path to the config file to update. Will default to a file named default_config.yaml in the cache location, which is the content\\n+ of the environment `HF_HOME` suffixed with \\'accelerate\\', or if you don\\'t have such an environment variable, your cache directory\\n+ (`~/.cache` or the content of `XDG_CACHE_HOME`) suffixed with `huggingface`.\\n+\\n+* `-h`, `--help` (`bool`) -- Show a help message and exit\\n+\\n+\\n ## accelerate env\\n \\n **Command**:\\ndiff --git a/src/accelerate/commands/config/__init__.py b/src/accelerate/commands/config/__init__.py\\nindex ba9d6cf30..9b1545982 100644\\n--- a/src/accelerate/commands/config/__init__.py\\n+++ b/src/accelerate/commands/config/__init__.py\\n@@ -16,27 +16,22 @@\\n \\n import argparse\\n \\n-from .config import config_command, config_command_parser\\n+from .config import config_command_parser\\n from .config_args import default_config_file, load_config_from_file # noqa: F401\\n-from .default import default_command_parser, default_config_command\\n-\\n-\\n-def filter_command_args(args: dict, args_prefix: str):\\n- \"Filters args while only keeping ones that are prefixed with `{args_prefix}.`\"\\n- new_args = argparse.Namespace()\\n- for key, value in vars(args).items():\\n- if key.startswith(args_prefix):\\n- setattr(new_args, key.replace(f\"{args_prefix}.\", \"\"), value)\\n- return new_args\\n+from .default import default_command_parser\\n+from .update import update_command_parser\\n \\n \\n def get_config_parser(subparsers=None):\\n parent_parser = argparse.ArgumentParser(add_help=False)\\n # The main config parser\\n config_parser = config_command_parser(subparsers)\\n+ # The subparser to add commands to\\n+ subcommands = config_parser.add_subparsers(title=\"subcommands\", dest=\"subcommand\")\\n \\n # Then add other parsers with the parent parser\\n- default_parser = default_command_parser(config_parser, parents=[parent_parser]) # noqa: F841\\n+ default_command_parser(subcommands, parents=[parent_parser])\\n+ update_command_parser(subcommands, parents=[parent_parser])\\n \\n return config_parser\\n \\n@@ -44,12 +39,13 @@ def get_config_parser(subparsers=None):\\n def main():\\n config_parser = get_config_parser()\\n args = config_parser.parse_args()\\n- if not args.default:\\n- args = filter_command_args(args, \"config_args\")\\n- config_command(args)\\n- elif args.default:\\n- args = filter_command_args(args, \"default_args\")\\n- default_config_command(args)\\n+\\n+ if not hasattr(args, \"func\"):\\n+ config_parser.print_help()\\n+ exit(1)\\n+\\n+ # Run\\n+ args.func(args)\\n \\n \\n if __name__ == \"__main__\":\\ndiff --git a/src/accelerate/commands/config/config.py b/src/accelerate/commands/config/config.py\\nindex b504f07ad..72414f2ab 100644\\n--- a/src/accelerate/commands/config/config.py\\n+++ b/src/accelerate/commands/config/config.py\\n@@ -21,13 +21,7 @@\\n \\n from .cluster import get_cluster_input\\n from .config_args import cache_dir, default_config_file, default_yaml_config_file, load_config_from_file # noqa: F401\\n-from .config_utils import ( # noqa: F401\\n- GroupedAction,\\n- SubcommandHelpFormatter,\\n- _ask_field,\\n- _ask_options,\\n- _convert_compute_environment,\\n-)\\n+from .config_utils import _ask_field, _ask_options, _convert_compute_environment # noqa: F401\\n from .sagemaker import get_sagemaker_input\\n \\n \\n@@ -49,18 +43,13 @@ def get_user_input():\\n \\n def config_command_parser(subparsers=None):\\n if subparsers is not None:\\n- parser = subparsers.add_parser(\"config\", description=description, formatter_class=SubcommandHelpFormatter)\\n+ parser = subparsers.add_parser(\"config\", description=description)\\n else:\\n- parser = argparse.ArgumentParser(\\n- \"Accelerate config command\", description=description, formatter_class=SubcommandHelpFormatter\\n- )\\n+ parser = argparse.ArgumentParser(\"Accelerate config command\", description=description)\\n \\n parser.add_argument(\\n \"--config_file\",\\n default=None,\\n- dest=\"config_args.config_file\",\\n- metavar=\"CONFIG_FILE\",\\n- action=GroupedAction,\\n help=(\\n \"The path to use to store the config file. Will default to a file named default_config.yaml in the cache \"\\n \"location, which is the content of the environment `HF_HOME` suffixed with \\'accelerate\\', or if you don\\'t have \"\\n@@ -87,6 +76,7 @@ def config_command(args):\\n config.to_json_file(config_file)\\n else:\\n config.to_yaml_file(config_file)\\n+ print(f\"accelerate configuration saved at {config_file}\")\\n \\n \\n def main():\\ndiff --git a/src/accelerate/commands/config/config_utils.py b/src/accelerate/commands/config/config_utils.py\\nindex 1deee9f32..736c3a3d5 100644\\n--- a/src/accelerate/commands/config/config_utils.py\\n+++ b/src/accelerate/commands/config/config_utils.py\\n@@ -88,29 +88,11 @@ def _convert_yes_no_to_bool(value):\\n return {\"yes\": True, \"no\": False}[value.lower()]\\n \\n \\n-class GroupedAction(argparse.Action):\\n- \"\"\"\\n- Filters arguments into seperate namespace groups based on the first part of the argument name.\\n- \"\"\"\\n-\\n- def __call__(self, parser, namespace, values, option_string=None):\\n- group, dest = self.dest.split(\".\", 2)\\n- groupspace = getattr(namespace, group, argparse.Namespace())\\n- setattr(groupspace, dest, values)\\n- setattr(namespace, group, groupspace)\\n-\\n-\\n class SubcommandHelpFormatter(argparse.RawDescriptionHelpFormatter):\\n \"\"\"\\n A custom formatter that will remove the usage line from the help message for subcommands.\\n \"\"\"\\n \\n- def _format_action(self, action):\\n- parts = super()._format_action(action)\\n- if action.nargs == argparse.PARSER:\\n- parts = \"\\\\n\".join(parts.split(\"\\\\n\")[1:])\\n- return parts\\n-\\n def _format_usage(self, usage, actions, groups, prefix):\\n usage = super()._format_usage(usage, actions, groups, prefix)\\n usage = usage.replace(\" [] \", \"\")\\ndiff --git a/src/accelerate/commands/config/default.py b/src/accelerate/commands/config/default.py\\nindex b87a1d45b..f7ceb84ce 100644\\n--- a/src/accelerate/commands/config/default.py\\n+++ b/src/accelerate/commands/config/default.py\\n@@ -14,13 +14,15 @@\\n # See the License for the specific language governing permissions and\\n # limitations under the License.\\n \\n-import argparse\\n from pathlib import Path\\n \\n import torch\\n \\n from .config_args import ClusterConfig, default_json_config_file\\n-from .config_utils import GroupedAction\\n+from .config_utils import SubcommandHelpFormatter\\n+\\n+\\n+description = \"Create a default config file for Accelerate with only a few flags set.\"\\n \\n \\n def write_basic_config(mixed_precision=\"no\", save_location: str = default_json_config_file, dynamo_backend=\"no\"):\\n@@ -42,7 +44,7 @@ def write_basic_config(mixed_precision=\"no\", save_location: str = default_json_c\\n print(\\n f\"Configuration already exists at {save_location}, will not override. Run `accelerate config` manually or pass a different `save_location`.\"\\n )\\n- return\\n+ return False\\n mixed_precision = mixed_precision.lower()\\n if mixed_precision not in [\"no\", \"fp16\", \"bf16\"]:\\n raise ValueError(f\"`mixed_precision` should be one of \\'no\\', \\'fp16\\', or \\'bf16\\'. Received {mixed_precision}\")\\n@@ -64,20 +66,13 @@ def write_basic_config(mixed_precision=\"no\", save_location: str = default_json_c\\n config[\"use_cpu\"] = True\\n config[\"num_processes\"] = 1\\n config[\"distributed_type\"] = \"NO\"\\n- if not path.exists():\\n- config = ClusterConfig(**config)\\n- config.to_json_file(path)\\n+ config = ClusterConfig(**config)\\n+ config.to_json_file(path)\\n+ return path\\n \\n \\n-description = \"Create a default config file for Accelerate with only a few flags set.\"\\n-\\n-\\n-def default_command_parser(parser=None, parents=None):\\n- if parser is None and parents is None:\\n- parser = argparse.ArgumentParser(description=description)\\n- else:\\n- default_parser = parser.add_subparsers(title=\"subcommand {default}\", dest=\"default\", description=description)\\n- parser = default_parser.add_parser(\"default\", parents=parents)\\n+def default_command_parser(parser, parents):\\n+ parser = parser.add_parser(\"default\", parents=parents, help=description, formatter_class=SubcommandHelpFormatter)\\n parser.add_argument(\\n \"--config_file\",\\n default=default_json_config_file,\\n@@ -87,9 +82,7 @@ def default_command_parser(parser=None, parents=None):\\n \"such an environment variable, your cache directory (\\'~/.cache\\' or the content of `XDG_CACHE_HOME`) suffixed \"\\n \"with \\'huggingface\\'.\"\\n ),\\n- dest=\"default_args.save_location\",\\n- metavar=\"CONFIG_FILE\",\\n- action=GroupedAction,\\n+ dest=\"save_location\",\\n )\\n \\n parser.add_argument(\\n@@ -100,14 +93,12 @@ def default_command_parser(parser=None, parents=None):\\n \"Choose between FP16 and BF16 (bfloat16) training. \"\\n \"BF16 training is only supported on Nvidia Ampere GPUs and PyTorch 1.10 or later.\",\\n default=\"no\",\\n- dest=\"default_args.mixed_precision\",\\n- action=GroupedAction,\\n )\\n parser.set_defaults(func=default_config_command)\\n return parser\\n \\n \\n def default_config_command(args):\\n- args = vars(args)\\n- args.pop(\"func\", None)\\n- write_basic_config(**args)\\n+ config_file = write_basic_config(args.mixed_precision, args.save_location)\\n+ if config_file:\\n+ print(f\"accelerate configuration saved at {config_file}\")\\ndiff --git a/src/accelerate/commands/config/update.py b/src/accelerate/commands/config/update.py\\nnew file mode 100644\\nindex 000000000..5f025594b\\n--- /dev/null\\n+++ b/src/accelerate/commands/config/update.py\\n@@ -0,0 +1,63 @@\\n+#!/usr/bin/env python\\n+\\n+# Copyright 2022 The HuggingFace Team. All rights reserved.\\n+#\\n+# Licensed under the Apache License, Version 2.0 (the \"License\");\\n+# you may not use this file except in compliance with the License.\\n+# You may obtain a copy of the License at\\n+#\\n+# http://www.apache.org/licenses/LICENSE-2.0\\n+#\\n+# Unless required by applicable law or agreed to in writing, software\\n+# distributed under the License is distributed on an \"AS IS\" BASIS,\\n+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\\n+# See the License for the specific language governing permissions and\\n+# limitations under the License.\\n+\\n+from pathlib import Path\\n+\\n+from .config_args import default_config_file, load_config_from_file\\n+from .config_utils import SubcommandHelpFormatter\\n+\\n+\\n+description = \"Update an existing config file with the latest defaults while maintaining the old configuration.\"\\n+\\n+\\n+def update_config(args):\\n+ \"\"\"\\n+ Update an existing config file with the latest defaults while maintaining the old configuration.\\n+ \"\"\"\\n+ config_file = args.config_file\\n+ if config_file is None and Path(default_config_file).exists():\\n+ config_file = default_config_file\\n+ elif not Path(config_file).exists():\\n+ raise ValueError(f\"The passed config file located at {config_file} doesn\\'t exist.\")\\n+ config = load_config_from_file(config_file)\\n+\\n+ if config_file.endswith(\".json\"):\\n+ config.to_json_file(config_file)\\n+ else:\\n+ config.to_yaml_file(config_file)\\n+ return config_file\\n+\\n+\\n+def update_command_parser(parser, parents):\\n+ parser = parser.add_parser(\"update\", parents=parents, help=description, formatter_class=SubcommandHelpFormatter)\\n+ parser.add_argument(\\n+ \"--config_file\",\\n+ default=None,\\n+ help=(\\n+ \"The path to the config file to update. Will default to a file named default_config.yaml in the cache \"\\n+ \"location, which is the content of the environment `HF_HOME` suffixed with \\'accelerate\\', or if you don\\'t have \"\\n+ \"such an environment variable, your cache directory (\\'~/.cache\\' or the content of `XDG_CACHE_HOME`) suffixed \"\\n+ \"with \\'huggingface\\'.\"\\n+ ),\\n+ )\\n+\\n+ parser.set_defaults(func=update_config_command)\\n+ return parser\\n+\\n+\\n+def update_config_command(args):\\n+ config_file = update_config(args)\\n+ print(f\"Sucessfully updated the configuration file at {config_file}.\")\\n',\n", - " 'code_comments': [],\n", - " 'context': [{'body': '_The documentation is not available anymore as the PR was closed or merged._',\n", - " 'from_author': False},\n", - " {'body': 'The DeepSpeed CI failures are unrelated and look like a startup issue',\n", - " 'from_author': True}],\n", - " 'url': 'https://api.github.com/repos/huggingface/accelerate/pulls/853'},\n", - " 1121918243: {'diff': 'diff --git a/docs/source/package_reference/cli.mdx b/docs/source/package_reference/cli.mdx\\nindex 562e1f4a4..eafec816e 100644\\n--- a/docs/source/package_reference/cli.mdx\\n+++ b/docs/source/package_reference/cli.mdx\\n@@ -35,11 +35,11 @@ accelerate config [arguments]\\n (`~/.cache` or the content of `XDG_CACHE_HOME`) suffixed with `huggingface`.\\n * `-h`, `--help` (`bool`) -- Show a help message and exit\\n \\n-## accelerate default-config \\n+## accelerate config default\\n \\n **Command**:\\n \\n-`accelerate default-config` or `accelerate-default-config`\\n+`accelerate config default` or `accelerate-config default`\\n \\n Create a default config file for Accelerate with only a few flags set.\\n \\ndiff --git a/setup.py b/setup.py\\nindex 622d6de69..f96ebc440 100644\\n--- a/setup.py\\n+++ b/setup.py\\n@@ -48,7 +48,6 @@\\n \"accelerate=accelerate.commands.accelerate_cli:main\",\\n \"accelerate-config=accelerate.commands.config:main\",\\n \"accelerate-launch=accelerate.commands.launch:main\",\\n- \"accelerate-default-config=accelerate.commands.config.default:main\",\\n ]\\n },\\n python_requires=\">=3.7.0\",\\ndiff --git a/src/accelerate/commands/accelerate_cli.py b/src/accelerate/commands/accelerate_cli.py\\nindex 8ffda3572..7716526c5 100644\\n--- a/src/accelerate/commands/accelerate_cli.py\\n+++ b/src/accelerate/commands/accelerate_cli.py\\n@@ -16,8 +16,7 @@\\n \\n from argparse import ArgumentParser\\n \\n-from accelerate.commands.config import config_command_parser\\n-from accelerate.commands.config.default import default_command_parser\\n+from accelerate.commands.config import get_config_parser\\n from accelerate.commands.env import env_command_parser\\n from accelerate.commands.launch import launch_command_parser\\n from accelerate.commands.test import test_command_parser\\n@@ -29,8 +28,7 @@ def main():\\n subparsers = parser.add_subparsers(help=\"accelerate command helpers\")\\n \\n # Register commands\\n- config_command_parser(subparsers=subparsers)\\n- default_command_parser(subparsers=subparsers)\\n+ get_config_parser(subparsers=subparsers)\\n env_command_parser(subparsers=subparsers)\\n launch_command_parser(subparsers=subparsers)\\n tpu_command_parser(subparsers=subparsers)\\ndiff --git a/src/accelerate/commands/config/__init__.py b/src/accelerate/commands/config/__init__.py\\nindex 2400555c4..ba9d6cf30 100644\\n--- a/src/accelerate/commands/config/__init__.py\\n+++ b/src/accelerate/commands/config/__init__.py\\n@@ -15,73 +15,41 @@\\n # limitations under the License.\\n \\n import argparse\\n-import os\\n \\n-from accelerate.utils import ComputeEnvironment\\n+from .config import config_command, config_command_parser\\n+from .config_args import default_config_file, load_config_from_file # noqa: F401\\n+from .default import default_command_parser, default_config_command\\n \\n-from .cluster import get_cluster_input\\n-from .config_args import cache_dir, default_config_file, default_yaml_config_file, load_config_from_file # noqa: F401\\n-from .config_utils import _ask_field, _ask_options, _convert_compute_environment # noqa: F401\\n-from .sagemaker import get_sagemaker_input\\n \\n+def filter_command_args(args: dict, args_prefix: str):\\n+ \"Filters args while only keeping ones that are prefixed with `{args_prefix}.`\"\\n+ new_args = argparse.Namespace()\\n+ for key, value in vars(args).items():\\n+ if key.startswith(args_prefix):\\n+ setattr(new_args, key.replace(f\"{args_prefix}.\", \"\"), value)\\n+ return new_args\\n \\n-description = \"Launches a series of prompts to create and save a `default_config.yaml` configuration file for your training system. Should always be ran first on your machine\"\\n \\n+def get_config_parser(subparsers=None):\\n+ parent_parser = argparse.ArgumentParser(add_help=False)\\n+ # The main config parser\\n+ config_parser = config_command_parser(subparsers)\\n \\n-def get_user_input():\\n- compute_environment = _ask_options(\\n- \"In which compute environment are you running?\",\\n- [\"This machine\", \"AWS (Amazon SageMaker)\"],\\n- _convert_compute_environment,\\n- )\\n- if compute_environment == ComputeEnvironment.AMAZON_SAGEMAKER:\\n- config = get_sagemaker_input()\\n- else:\\n- config = get_cluster_input()\\n- return config\\n+ # Then add other parsers with the parent parser\\n+ default_parser = default_command_parser(config_parser, parents=[parent_parser]) # noqa: F841\\n \\n-\\n-def config_command_parser(subparsers=None):\\n- if subparsers is not None:\\n- parser = subparsers.add_parser(\"config\", description=description)\\n- else:\\n- parser = argparse.ArgumentParser(\"Accelerate config command\", description=description)\\n-\\n- parser.add_argument(\\n- \"--config_file\",\\n- default=None,\\n- help=(\\n- \"The path to use to store the config file. Will default to a file named default_config.yaml in the cache \"\\n- \"location, which is the content of the environment `HF_HOME` suffixed with \\'accelerate\\', or if you don\\'t have \"\\n- \"such an environment variable, your cache directory (\\'~/.cache\\' or the content of `XDG_CACHE_HOME`) suffixed \"\\n- \"with \\'huggingface\\'.\"\\n- ),\\n- )\\n-\\n- if subparsers is not None:\\n- parser.set_defaults(func=config_command)\\n- return parser\\n-\\n-\\n-def config_command(args):\\n- config = get_user_input()\\n- if args.config_file is not None:\\n- config_file = args.config_file\\n- else:\\n- if not os.path.isdir(cache_dir):\\n- os.makedirs(cache_dir)\\n- config_file = default_yaml_config_file\\n-\\n- if config_file.endswith(\".json\"):\\n- config.to_json_file(config_file)\\n- else:\\n- config.to_yaml_file(config_file)\\n+ return config_parser\\n \\n \\n def main():\\n- parser = config_command_parser()\\n- args = parser.parse_args()\\n- config_command(args)\\n+ config_parser = get_config_parser()\\n+ args = config_parser.parse_args()\\n+ if not args.default:\\n+ args = filter_command_args(args, \"config_args\")\\n+ config_command(args)\\n+ elif args.default:\\n+ args = filter_command_args(args, \"default_args\")\\n+ default_config_command(args)\\n \\n \\n if __name__ == \"__main__\":\\ndiff --git a/src/accelerate/commands/config/config.py b/src/accelerate/commands/config/config.py\\nnew file mode 100644\\nindex 000000000..b504f07ad\\n--- /dev/null\\n+++ b/src/accelerate/commands/config/config.py\\n@@ -0,0 +1,99 @@\\n+#!/usr/bin/env python\\n+\\n+# Copyright 2021 The HuggingFace Team. All rights reserved.\\n+#\\n+# Licensed under the Apache License, Version 2.0 (the \"License\");\\n+# you may not use this file except in compliance with the License.\\n+# You may obtain a copy of the License at\\n+#\\n+# http://www.apache.org/licenses/LICENSE-2.0\\n+#\\n+# Unless required by applicable law or agreed to in writing, software\\n+# distributed under the License is distributed on an \"AS IS\" BASIS,\\n+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\\n+# See the License for the specific language governing permissions and\\n+# limitations under the License.\\n+\\n+import argparse\\n+import os\\n+\\n+from accelerate.utils import ComputeEnvironment\\n+\\n+from .cluster import get_cluster_input\\n+from .config_args import cache_dir, default_config_file, default_yaml_config_file, load_config_from_file # noqa: F401\\n+from .config_utils import ( # noqa: F401\\n+ GroupedAction,\\n+ SubcommandHelpFormatter,\\n+ _ask_field,\\n+ _ask_options,\\n+ _convert_compute_environment,\\n+)\\n+from .sagemaker import get_sagemaker_input\\n+\\n+\\n+description = \"Launches a series of prompts to create and save a `default_config.yaml` configuration file for your training system. Should always be ran first on your machine\"\\n+\\n+\\n+def get_user_input():\\n+ compute_environment = _ask_options(\\n+ \"In which compute environment are you running?\",\\n+ [\"This machine\", \"AWS (Amazon SageMaker)\"],\\n+ _convert_compute_environment,\\n+ )\\n+ if compute_environment == ComputeEnvironment.AMAZON_SAGEMAKER:\\n+ config = get_sagemaker_input()\\n+ else:\\n+ config = get_cluster_input()\\n+ return config\\n+\\n+\\n+def config_command_parser(subparsers=None):\\n+ if subparsers is not None:\\n+ parser = subparsers.add_parser(\"config\", description=description, formatter_class=SubcommandHelpFormatter)\\n+ else:\\n+ parser = argparse.ArgumentParser(\\n+ \"Accelerate config command\", description=description, formatter_class=SubcommandHelpFormatter\\n+ )\\n+\\n+ parser.add_argument(\\n+ \"--config_file\",\\n+ default=None,\\n+ dest=\"config_args.config_file\",\\n+ metavar=\"CONFIG_FILE\",\\n+ action=GroupedAction,\\n+ help=(\\n+ \"The path to use to store the config file. Will default to a file named default_config.yaml in the cache \"\\n+ \"location, which is the content of the environment `HF_HOME` suffixed with \\'accelerate\\', or if you don\\'t have \"\\n+ \"such an environment variable, your cache directory (\\'~/.cache\\' or the content of `XDG_CACHE_HOME`) suffixed \"\\n+ \"with \\'huggingface\\'.\"\\n+ ),\\n+ )\\n+\\n+ if subparsers is not None:\\n+ parser.set_defaults(func=config_command)\\n+ return parser\\n+\\n+\\n+def config_command(args):\\n+ config = get_user_input()\\n+ if args.config_file is not None:\\n+ config_file = args.config_file\\n+ else:\\n+ if not os.path.isdir(cache_dir):\\n+ os.makedirs(cache_dir)\\n+ config_file = default_yaml_config_file\\n+\\n+ if config_file.endswith(\".json\"):\\n+ config.to_json_file(config_file)\\n+ else:\\n+ config.to_yaml_file(config_file)\\n+\\n+\\n+def main():\\n+ parser = config_command_parser()\\n+ args = parser.parse_args()\\n+ config_command(args)\\n+\\n+\\n+if __name__ == \"__main__\":\\n+ main()\\ndiff --git a/src/accelerate/commands/config/config_utils.py b/src/accelerate/commands/config/config_utils.py\\nindex def29fe06..1deee9f32 100644\\n--- a/src/accelerate/commands/config/config_utils.py\\n+++ b/src/accelerate/commands/config/config_utils.py\\n@@ -14,6 +14,8 @@\\n # See the License for the specific language governing permissions and\\n # limitations under the License.\\n \\n+import argparse\\n+\\n from ...utils.dataclasses import (\\n ComputeEnvironment,\\n DistributedType,\\n@@ -84,3 +86,32 @@ def _convert_sagemaker_distributed_mode(value):\\n \\n def _convert_yes_no_to_bool(value):\\n return {\"yes\": True, \"no\": False}[value.lower()]\\n+\\n+\\n+class GroupedAction(argparse.Action):\\n+ \"\"\"\\n+ Filters arguments into seperate namespace groups based on the first part of the argument name.\\n+ \"\"\"\\n+\\n+ def __call__(self, parser, namespace, values, option_string=None):\\n+ group, dest = self.dest.split(\".\", 2)\\n+ groupspace = getattr(namespace, group, argparse.Namespace())\\n+ setattr(groupspace, dest, values)\\n+ setattr(namespace, group, groupspace)\\n+\\n+\\n+class SubcommandHelpFormatter(argparse.RawDescriptionHelpFormatter):\\n+ \"\"\"\\n+ A custom formatter that will remove the usage line from the help message for subcommands.\\n+ \"\"\"\\n+\\n+ def _format_action(self, action):\\n+ parts = super()._format_action(action)\\n+ if action.nargs == argparse.PARSER:\\n+ parts = \"\\\\n\".join(parts.split(\"\\\\n\")[1:])\\n+ return parts\\n+\\n+ def _format_usage(self, usage, actions, groups, prefix):\\n+ usage = super()._format_usage(usage, actions, groups, prefix)\\n+ usage = usage.replace(\" [] \", \"\")\\n+ return usage\\ndiff --git a/src/accelerate/commands/config/default.py b/src/accelerate/commands/config/default.py\\nindex 956c3d719..b87a1d45b 100644\\n--- a/src/accelerate/commands/config/default.py\\n+++ b/src/accelerate/commands/config/default.py\\n@@ -15,21 +15,69 @@\\n # limitations under the License.\\n \\n import argparse\\n-\\n-from accelerate.utils import write_basic_config\\n-\\n-from .config_args import default_json_config_file\\n+from pathlib import Path\\n+\\n+import torch\\n+\\n+from .config_args import ClusterConfig, default_json_config_file\\n+from .config_utils import GroupedAction\\n+\\n+\\n+def write_basic_config(mixed_precision=\"no\", save_location: str = default_json_config_file, dynamo_backend=\"no\"):\\n+ \"\"\"\\n+ Creates and saves a basic cluster config to be used on a local machine with potentially multiple GPUs. Will also\\n+ set CPU if it is a CPU-only machine.\\n+\\n+ Args:\\n+ mixed_precision (`str`, *optional*, defaults to \"no\"):\\n+ Mixed Precision to use. Should be one of \"no\", \"fp16\", or \"bf16\"\\n+ save_location (`str`, *optional*, defaults to `default_json_config_file`):\\n+ Optional custom save location. Should be passed to `--config_file` when using `accelerate launch`. Default\\n+ location is inside the huggingface cache folder (`~/.cache/huggingface`) but can be overriden by setting\\n+ the `HF_HOME` environmental variable, followed by `accelerate/default_config.yaml`.\\n+ \"\"\"\\n+ path = Path(save_location)\\n+ path.parent.mkdir(parents=True, exist_ok=True)\\n+ if path.exists():\\n+ print(\\n+ f\"Configuration already exists at {save_location}, will not override. Run `accelerate config` manually or pass a different `save_location`.\"\\n+ )\\n+ return\\n+ mixed_precision = mixed_precision.lower()\\n+ if mixed_precision not in [\"no\", \"fp16\", \"bf16\"]:\\n+ raise ValueError(f\"`mixed_precision` should be one of \\'no\\', \\'fp16\\', or \\'bf16\\'. Received {mixed_precision}\")\\n+ config = {\\n+ \"compute_environment\": \"LOCAL_MACHINE\",\\n+ \"mixed_precision\": mixed_precision,\\n+ \"dynamo_backend\": dynamo_backend,\\n+ }\\n+ if torch.cuda.is_available():\\n+ num_gpus = torch.cuda.device_count()\\n+ config[\"num_processes\"] = num_gpus\\n+ config[\"use_cpu\"] = False\\n+ if num_gpus > 1:\\n+ config[\"distributed_type\"] = \"MULTI_GPU\"\\n+ else:\\n+ config[\"distributed_type\"] = \"NO\"\\n+ else:\\n+ num_gpus = 0\\n+ config[\"use_cpu\"] = True\\n+ config[\"num_processes\"] = 1\\n+ config[\"distributed_type\"] = \"NO\"\\n+ if not path.exists():\\n+ config = ClusterConfig(**config)\\n+ config.to_json_file(path)\\n \\n \\n description = \"Create a default config file for Accelerate with only a few flags set.\"\\n \\n \\n-def default_command_parser(subparsers=None):\\n- if subparsers is not None:\\n- parser = subparsers.add_parser(\"default-config\", description=description)\\n+def default_command_parser(parser=None, parents=None):\\n+ if parser is None and parents is None:\\n+ parser = argparse.ArgumentParser(description=description)\\n else:\\n- parser = argparse.ArgumentParser(\"Accelerate default-config command\", description=description)\\n-\\n+ default_parser = parser.add_subparsers(title=\"subcommand {default}\", dest=\"default\", description=description)\\n+ parser = default_parser.add_parser(\"default\", parents=parents)\\n parser.add_argument(\\n \"--config_file\",\\n default=default_json_config_file,\\n@@ -39,7 +87,9 @@ def default_command_parser(subparsers=None):\\n \"such an environment variable, your cache directory (\\'~/.cache\\' or the content of `XDG_CACHE_HOME`) suffixed \"\\n \"with \\'huggingface\\'.\"\\n ),\\n- dest=\"save_location\",\\n+ dest=\"default_args.save_location\",\\n+ metavar=\"CONFIG_FILE\",\\n+ action=GroupedAction,\\n )\\n \\n parser.add_argument(\\n@@ -50,24 +100,14 @@ def default_command_parser(subparsers=None):\\n \"Choose between FP16 and BF16 (bfloat16) training. \"\\n \"BF16 training is only supported on Nvidia Ampere GPUs and PyTorch 1.10 or later.\",\\n default=\"no\",\\n+ dest=\"default_args.mixed_precision\",\\n+ action=GroupedAction,\\n )\\n-\\n- if subparsers is not None:\\n- parser.set_defaults(func=config_command)\\n+ parser.set_defaults(func=default_config_command)\\n return parser\\n \\n \\n-def config_command(args):\\n+def default_config_command(args):\\n args = vars(args)\\n args.pop(\"func\", None)\\n write_basic_config(**args)\\n-\\n-\\n-def main():\\n- parser = default_command_parser()\\n- args = parser.parse_args()\\n- config_command(args)\\n-\\n-\\n-if __name__ == \"__main__\":\\n- main()\\ndiff --git a/src/accelerate/utils/other.py b/src/accelerate/utils/other.py\\nindex f1af373b6..a9b6f9d4b 100644\\n--- a/src/accelerate/utils/other.py\\n+++ b/src/accelerate/utils/other.py\\n@@ -14,12 +14,10 @@\\n \\n import os\\n from contextlib import contextmanager\\n-from pathlib import Path\\n \\n import torch\\n \\n-from ..commands.config.cluster import ClusterConfig\\n-from ..commands.config.config_args import default_json_config_file\\n+from ..commands.config.default import write_basic_config # noqa: F401\\n from ..state import AcceleratorState\\n from .dataclasses import DistributedType\\n from .imports import is_deepspeed_available, is_tpu_available\\n@@ -113,49 +111,3 @@ def get_pretty_name(obj):\\n if hasattr(obj, \"__name__\"):\\n return obj.__name__\\n return str(obj)\\n-\\n-\\n-def write_basic_config(mixed_precision=\"no\", save_location: str = default_json_config_file, dynamo_backend=\"no\"):\\n- \"\"\"\\n- Creates and saves a basic cluster config to be used on a local machine with potentially multiple GPUs. Will also\\n- set CPU if it is a CPU-only machine.\\n-\\n- Args:\\n- mixed_precision (`str`, *optional*, defaults to \"no\"):\\n- Mixed Precision to use. Should be one of \"no\", \"fp16\", or \"bf16\"\\n- save_location (`str`, *optional*, defaults to `default_json_config_file`):\\n- Optional custom save location. Should be passed to `--config_file` when using `accelerate launch`. Default\\n- location is inside the huggingface cache folder (`~/.cache/huggingface`) but can be overriden by setting\\n- the `HF_HOME` environmental variable, followed by `accelerate/default_config.yaml`.\\n- \"\"\"\\n- path = Path(save_location)\\n- path.parent.mkdir(parents=True, exist_ok=True)\\n- if path.exists():\\n- print(\\n- f\"Configuration already exists at {save_location}, will not override. Run `accelerate config` manually or pass a different `save_location`.\"\\n- )\\n- return\\n- mixed_precision = mixed_precision.lower()\\n- if mixed_precision not in [\"no\", \"fp16\", \"bf16\"]:\\n- raise ValueError(f\"`mixed_precision` should be one of \\'no\\', \\'fp16\\', or \\'bf16\\'. Received {mixed_precision}\")\\n- config = {\\n- \"compute_environment\": \"LOCAL_MACHINE\",\\n- \"mixed_precision\": mixed_precision,\\n- \"dynamo_backend\": dynamo_backend,\\n- }\\n- if torch.cuda.is_available():\\n- num_gpus = torch.cuda.device_count()\\n- config[\"num_processes\"] = num_gpus\\n- config[\"use_cpu\"] = False\\n- if num_gpus > 1:\\n- config[\"distributed_type\"] = \"MULTI_GPU\"\\n- else:\\n- config[\"distributed_type\"] = \"NO\"\\n- else:\\n- num_gpus = 0\\n- config[\"use_cpu\"] = True\\n- config[\"num_processes\"] = 1\\n- config[\"distributed_type\"] = \"NO\"\\n- if not path.exists():\\n- config = ClusterConfig(**config)\\n- config.to_json_file(path)\\n',\n", - " 'code_comments': [{'body': '```suggestion\\r\\n```',\n", - " 'diff_hunk': '@@ -15,73 +15,42 @@\\n # limitations under the License.\\n \\n import argparse\\n-import os\\n \\n-from accelerate.utils import ComputeEnvironment\\n+from .config import config_command, config_command_parser\\n+from .config_args import default_config_file, load_config_from_file # noqa: F401\\n+from .default import default_command_parser, default_config_command\\n \\n-from .cluster import get_cluster_input\\n-from .config_args import cache_dir, default_config_file, default_yaml_config_file, load_config_from_file # noqa: F401\\n-from .config_utils import _ask_field, _ask_options, _convert_compute_environment # noqa: F401\\n-from .sagemaker import get_sagemaker_input\\n \\n+def filter_command_args(args: dict, args_prefix: str):\\n+ \"Filters args while only keeping ones that are prefixed with `{args_prefix}.`\"\\n+ new_args = argparse.Namespace()\\n+ for key, value in vars(args).items():\\n+ print(key, value)',\n", - " 'from_author': False},\n", - " {'body': 'is this required here anymore?',\n", - " 'diff_hunk': '@@ -14,12 +14,10 @@\\n \\n import os\\n from contextlib import contextmanager\\n-from pathlib import Path\\n \\n import torch\\n \\n-from ..commands.config.cluster import ClusterConfig\\n-from ..commands.config.config_args import default_json_config_file\\n+from ..commands.config.default import write_basic_config # noqa: F401',\n", - " 'from_author': False},\n", - " {'body': 'Yep, this is the nit about making sure we can keep `from accelerate.utils import write_basic_config` ',\n", - " 'diff_hunk': '@@ -14,12 +14,10 @@\\n \\n import os\\n from contextlib import contextmanager\\n-from pathlib import Path\\n \\n import torch\\n \\n-from ..commands.config.cluster import ClusterConfig\\n-from ..commands.config.config_args import default_json_config_file\\n+from ..commands.config.default import write_basic_config # noqa: F401',\n", - " 'from_author': True}],\n", - " 'context': [{'body': '_The documentation is not available anymore as the PR was closed or merged._',\n", - " 'from_author': False}],\n", - " 'url': 'https://api.github.com/repos/huggingface/accelerate/pulls/851'},\n", - " 1121455495: {'diff': 'diff --git a/src/accelerate/scheduler.py b/src/accelerate/scheduler.py\\nindex 835d4e0d9..2652a2105 100644\\n--- a/src/accelerate/scheduler.py\\n+++ b/src/accelerate/scheduler.py\\n@@ -69,8 +69,9 @@ def step(self, *args, **kwargs):\\n num_processes = AcceleratorState().num_processes\\n for _ in range(num_processes):\\n # Special case when using OneCycle and `drop_last` was not used\\n- if hasattr(self.scheduler, \"total_steps\") and self.scheduler._step_count <= self.scheduler.total_steps:\\n- self.scheduler.step(*args, **kwargs)\\n+ if hasattr(self.scheduler, \"total_steps\"):\\n+ if self.scheduler._step_count <= self.scheduler.total_steps:\\n+ self.scheduler.step(*args, **kwargs)\\n else:\\n self.scheduler.step(*args, **kwargs)\\n \\n',\n", - " 'code_comments': [],\n", - " 'context': [{'body': '_The documentation is not available anymore as the PR was closed or merged._',\n", - " 'from_author': False}],\n", - " 'url': 'https://api.github.com/repos/huggingface/accelerate/pulls/849'},\n", - " 1121430707: {'diff': 'diff --git a/examples/complete_cv_example.py b/examples/complete_cv_example.py\\nindex 880981594..bca55aa87 100644\\n--- a/examples/complete_cv_example.py\\n+++ b/examples/complete_cv_example.py\\n@@ -230,6 +230,7 @@ def training_function(config, args):\\n accelerator.save_state(output_dir)\\n model.eval()\\n accurate = 0\\n+ num_elems = 0\\n for step, batch in enumerate(eval_dataloader):\\n # We could avoid this line since we set the accelerator with `device_placement=True`.\\n batch = {k: v.to(accelerator.device) for k, v in batch.items()}\\n@@ -239,9 +240,10 @@ def training_function(config, args):\\n predictions = outputs.argmax(dim=-1)\\n predictions, references = accelerator.gather_for_metrics((predictions, batch[\"label\"]))\\n accurate_preds = predictions == references\\n+ num_elems += accurate_preds.shape[0]\\n accurate += accurate_preds.long().sum()\\n \\n- eval_metric = accurate.item() / accelerator.gradient_state.samples_seen\\n+ eval_metric = accurate.item() / num_elems\\n # Use accelerator.print to print only on the main process.\\n accelerator.print(f\"epoch {epoch}: {100 * eval_metric:.2f}\")\\n if args.with_tracking:\\n',\n", - " 'code_comments': [],\n", - " 'context': [],\n", - " 'url': 'https://api.github.com/repos/huggingface/accelerate/pulls/848'},\n", - " 1118288220: {'diff': 'diff --git a/docs/source/package_reference/logging.mdx b/docs/source/package_reference/logging.mdx\\nindex 675af41ee..85e844690 100644\\n--- a/docs/source/package_reference/logging.mdx\\n+++ b/docs/source/package_reference/logging.mdx\\n@@ -21,4 +21,14 @@ To utilize this replace cases of `logging` with `accelerate.logging`:\\n + logger = get_logger(__name__)\\n ```\\n \\n+## Setting the log level\\n+\\n+The log level can be set with the `ACCELERATE_LOG_LEVEL` environment variable or by passing \\n+`log_level` to `get_logger`:\\n+```python\\n+from accelerate.logging import get_logger\\n+\\n+logger = get_logger(__name__, log_level=\"INFO\")\\n+```\\n+\\n [[autodoc]] logging.get_logger\\n\\\\ No newline at end of file\\ndiff --git a/src/accelerate/accelerator.py b/src/accelerate/accelerator.py\\nindex f595c08cc..7a9ac92f4 100644\\n--- a/src/accelerate/accelerator.py\\n+++ b/src/accelerate/accelerator.py\\n@@ -207,11 +207,6 @@ def __init__(\\n dynamo_backend: Union[DynamoBackend, str] = None,\\n ):\\n self.logging_dir = logging_dir\\n- trackers = filter_trackers(log_with, self.logging_dir)\\n- if len(trackers) < 1 and log_with is not None:\\n- warnings.warn(f\"`log_with={log_with}` was passed but no supported trackers are currently installed.\")\\n- self.log_with = trackers\\n-\\n if mixed_precision is not None:\\n mixed_precision = str(mixed_precision)\\n if mixed_precision not in PrecisionType:\\n@@ -303,6 +298,11 @@ def __init__(\\n **kwargs,\\n )\\n \\n+ trackers = filter_trackers(log_with, self.logging_dir)\\n+ if len(trackers) < 1 and log_with is not None:\\n+ warnings.warn(f\"`log_with={log_with}` was passed but no supported trackers are currently installed.\")\\n+ self.log_with = trackers\\n+\\n if (\\n (mixed_precision != \"bf16\")\\n and getattr(self.state, \"downcast_bfloat\", False)\\ndiff --git a/src/accelerate/logging.py b/src/accelerate/logging.py\\nindex dca093215..46118f7c6 100644\\n--- a/src/accelerate/logging.py\\n+++ b/src/accelerate/logging.py\\n@@ -13,6 +13,7 @@\\n # limitations under the License.\\n \\n import logging\\n+import os\\n \\n from .state import AcceleratorState\\n from .utils import DistributedType\\n@@ -49,7 +50,7 @@ def log(self, level, msg, *args, **kwargs):\\n self.logger.log(level, msg, *args, **kwargs)\\n \\n \\n-def get_logger(name: str):\\n+def get_logger(name: str, log_level: str = None):\\n \"\"\"\\n Returns a `logging.Logger` for `name` that can handle multiprocessing.\\n \\n@@ -58,6 +59,8 @@ def get_logger(name: str):\\n Args:\\n name (`str`):\\n The name for the logger, such as `__file__`\\n+ log_level (`str`, *optional*):\\n+ The log level to use. If not passed, will default to the `LOG_LEVEL` environment variable, or `INFO` if not\\n \\n Example:\\n \\n@@ -68,7 +71,14 @@ def get_logger(name: str):\\n \\n >>> logger.info(\"My log\", main_process_only=False)\\n >>> logger.debug(\"My log\", main_process_only=True)\\n+\\n+ >>> logger = get_logger(__name__, accelerate_log_level=\"DEBUG\")\\n+ >>> logger.info(\"My log\")\\n+ >>> logger.debug(\"My second log\")\\n ```\\n \"\"\"\\n+ if log_level is None:\\n+ log_level = os.environ.get(\"ACCELERATE_LOG_LEVEL\", \"INFO\")\\n logger = logging.getLogger(name)\\n+ logging.basicConfig(level=log_level.upper())\\n return MultiProcessAdapter(logger, {})\\ndiff --git a/src/accelerate/tracking.py b/src/accelerate/tracking.py\\nindex 3a31fed93..689ee5033 100644\\n--- a/src/accelerate/tracking.py\\n+++ b/src/accelerate/tracking.py\\n@@ -577,7 +577,6 @@ def filter_trackers(\\n if log_with is not None:\\n if not isinstance(log_with, (list, tuple)):\\n log_with = [log_with]\\n- logger.debug(f\"{log_with}\")\\n if \"all\" in log_with or LoggerType.ALL in log_with:\\n loggers = [o for o in log_with if issubclass(type(o), GeneralTracker)] + get_available_trackers()\\n else:\\n',\n", - " 'code_comments': [{'body': 'Is this a standard name that comes from `logging`? Should we also look for an `ACCELERATE_LOG_LEVEL` if users want to use a different one from the standard one? (So priority would be to look for the accelerate log level, then the general one).',\n", - " 'diff_hunk': '@@ -21,4 +21,14 @@ To utilize this replace cases of `logging` with `accelerate.logging`:\\n + logger = get_logger(__name__)\\n ```\\n \\n+## Setting the log level\\n+\\n+The log level can be set with the `LOG_LEVEL` environment variable or by passing ',\n", - " 'from_author': False},\n", - " {'body': '```suggestion\\r\\n log_level (`str`, *optional*):\\r\\n```\\r\\nMake sure to re-read the doc-writing guide ;-)',\n", - " 'diff_hunk': '@@ -58,6 +59,8 @@ def get_logger(name: str):\\n Args:\\n name (`str`):\\n The name for the logger, such as `__file__`\\n+ log_level (`str`, `optional`, defaults to `None`):',\n", - " 'from_author': False},\n", - " {'body': \"There is not, generally I've seen it as `LOGLEVEL` for the recommended usage but then it's advised to pull it in from `os.environ`. However you're right having something Accelerate specific would be much better so I will opt for `ACCELERATE_LOG_LEVEL` instead\",\n", - " 'diff_hunk': '@@ -21,4 +21,14 @@ To utilize this replace cases of `logging` with `accelerate.logging`:\\n + logger = get_logger(__name__)\\n ```\\n \\n+## Setting the log level\\n+\\n+The log level can be set with the `LOG_LEVEL` environment variable or by passing ',\n", - " 'from_author': True}],\n", - " 'context': [{'body': '_The documentation is not available anymore as the PR was closed or merged._',\n", - " 'from_author': False}],\n", - " 'url': 'https://api.github.com/repos/huggingface/accelerate/pulls/842'},\n", - " 1118231769: {'diff': 'diff --git a/docs/source/package_reference/cli.mdx b/docs/source/package_reference/cli.mdx\\nindex 9643d3b4f..562e1f4a4 100644\\n--- a/docs/source/package_reference/cli.mdx\\n+++ b/docs/source/package_reference/cli.mdx\\n@@ -35,6 +35,28 @@ accelerate config [arguments]\\n (`~/.cache` or the content of `XDG_CACHE_HOME`) suffixed with `huggingface`.\\n * `-h`, `--help` (`bool`) -- Show a help message and exit\\n \\n+## accelerate default-config \\n+\\n+**Command**:\\n+\\n+`accelerate default-config` or `accelerate-default-config`\\n+\\n+Create a default config file for Accelerate with only a few flags set.\\n+\\n+**Usage**: \\n+\\n+```bash\\n+accelerate default-config [arguments]\\n+```\\n+\\n+**Optional Arguments**:\\n+* `--config_file CONFIG_FILE` (`str`) -- The path to use to store the config file. Will default to a file named default_config.yaml in the cache location, which is the content\\n+ of the environment `HF_HOME` suffixed with \\'accelerate\\', or if you don\\'t have such an environment variable, your cache directory\\n+ (`~/.cache` or the content of `XDG_CACHE_HOME`) suffixed with `huggingface`.\\n+\\n+* `-h`, `--help` (`bool`) -- Show a help message and exit\\n+* `--mixed_precision {no,fp16,bf16}` (`str`) -- Whether or not to use mixed precision training. Choose between FP16 and BF16 (bfloat16) training. BF16 training is only supported on Nvidia Ampere GPUs and PyTorch 1.10 or later.\\n+\\n ## accelerate env\\n \\n **Command**:\\ndiff --git a/setup.py b/setup.py\\nindex f96ebc440..622d6de69 100644\\n--- a/setup.py\\n+++ b/setup.py\\n@@ -48,6 +48,7 @@\\n \"accelerate=accelerate.commands.accelerate_cli:main\",\\n \"accelerate-config=accelerate.commands.config:main\",\\n \"accelerate-launch=accelerate.commands.launch:main\",\\n+ \"accelerate-default-config=accelerate.commands.config.default:main\",\\n ]\\n },\\n python_requires=\">=3.7.0\",\\ndiff --git a/src/accelerate/commands/accelerate_cli.py b/src/accelerate/commands/accelerate_cli.py\\nindex f0e76fd2c..8ffda3572 100644\\n--- a/src/accelerate/commands/accelerate_cli.py\\n+++ b/src/accelerate/commands/accelerate_cli.py\\n@@ -17,6 +17,7 @@\\n from argparse import ArgumentParser\\n \\n from accelerate.commands.config import config_command_parser\\n+from accelerate.commands.config.default import default_command_parser\\n from accelerate.commands.env import env_command_parser\\n from accelerate.commands.launch import launch_command_parser\\n from accelerate.commands.test import test_command_parser\\n@@ -29,6 +30,7 @@ def main():\\n \\n # Register commands\\n config_command_parser(subparsers=subparsers)\\n+ default_command_parser(subparsers=subparsers)\\n env_command_parser(subparsers=subparsers)\\n launch_command_parser(subparsers=subparsers)\\n tpu_command_parser(subparsers=subparsers)\\ndiff --git a/src/accelerate/commands/config/__init__.py b/src/accelerate/commands/config/__init__.py\\nindex 4b02ac40c..2400555c4 100644\\n--- a/src/accelerate/commands/config/__init__.py\\n+++ b/src/accelerate/commands/config/__init__.py\\n@@ -25,6 +25,9 @@\\n from .sagemaker import get_sagemaker_input\\n \\n \\n+description = \"Launches a series of prompts to create and save a `default_config.yaml` configuration file for your training system. Should always be ran first on your machine\"\\n+\\n+\\n def get_user_input():\\n compute_environment = _ask_options(\\n \"In which compute environment are you running?\",\\n@@ -40,9 +43,9 @@ def get_user_input():\\n \\n def config_command_parser(subparsers=None):\\n if subparsers is not None:\\n- parser = subparsers.add_parser(\"config\")\\n+ parser = subparsers.add_parser(\"config\", description=description)\\n else:\\n- parser = argparse.ArgumentParser(\"Accelerate config command\")\\n+ parser = argparse.ArgumentParser(\"Accelerate config command\", description=description)\\n \\n parser.add_argument(\\n \"--config_file\",\\ndiff --git a/src/accelerate/commands/config/default.py b/src/accelerate/commands/config/default.py\\nnew file mode 100644\\nindex 000000000..956c3d719\\n--- /dev/null\\n+++ b/src/accelerate/commands/config/default.py\\n@@ -0,0 +1,73 @@\\n+#!/usr/bin/env python\\n+\\n+# Copyright 2021 The HuggingFace Team. All rights reserved.\\n+#\\n+# Licensed under the Apache License, Version 2.0 (the \"License\");\\n+# you may not use this file except in compliance with the License.\\n+# You may obtain a copy of the License at\\n+#\\n+# http://www.apache.org/licenses/LICENSE-2.0\\n+#\\n+# Unless required by applicable law or agreed to in writing, software\\n+# distributed under the License is distributed on an \"AS IS\" BASIS,\\n+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\\n+# See the License for the specific language governing permissions and\\n+# limitations under the License.\\n+\\n+import argparse\\n+\\n+from accelerate.utils import write_basic_config\\n+\\n+from .config_args import default_json_config_file\\n+\\n+\\n+description = \"Create a default config file for Accelerate with only a few flags set.\"\\n+\\n+\\n+def default_command_parser(subparsers=None):\\n+ if subparsers is not None:\\n+ parser = subparsers.add_parser(\"default-config\", description=description)\\n+ else:\\n+ parser = argparse.ArgumentParser(\"Accelerate default-config command\", description=description)\\n+\\n+ parser.add_argument(\\n+ \"--config_file\",\\n+ default=default_json_config_file,\\n+ help=(\\n+ \"The path to use to store the config file. Will default to a file named default_config.yaml in the cache \"\\n+ \"location, which is the content of the environment `HF_HOME` suffixed with \\'accelerate\\', or if you don\\'t have \"\\n+ \"such an environment variable, your cache directory (\\'~/.cache\\' or the content of `XDG_CACHE_HOME`) suffixed \"\\n+ \"with \\'huggingface\\'.\"\\n+ ),\\n+ dest=\"save_location\",\\n+ )\\n+\\n+ parser.add_argument(\\n+ \"--mixed_precision\",\\n+ choices=[\"no\", \"fp16\", \"bf16\"],\\n+ type=str,\\n+ help=\"Whether or not to use mixed precision training. \"\\n+ \"Choose between FP16 and BF16 (bfloat16) training. \"\\n+ \"BF16 training is only supported on Nvidia Ampere GPUs and PyTorch 1.10 or later.\",\\n+ default=\"no\",\\n+ )\\n+\\n+ if subparsers is not None:\\n+ parser.set_defaults(func=config_command)\\n+ return parser\\n+\\n+\\n+def config_command(args):\\n+ args = vars(args)\\n+ args.pop(\"func\", None)\\n+ write_basic_config(**args)\\n+\\n+\\n+def main():\\n+ parser = default_command_parser()\\n+ args = parser.parse_args()\\n+ config_command(args)\\n+\\n+\\n+if __name__ == \"__main__\":\\n+ main()\\n',\n", - " 'code_comments': [{'body': \"I wouldn't include this in the default.\",\n", - " 'diff_hunk': '@@ -0,0 +1,82 @@\\n+#!/usr/bin/env python\\n+\\n+# Copyright 2021 The HuggingFace Team. All rights reserved.\\n+#\\n+# Licensed under the Apache License, Version 2.0 (the \"License\");\\n+# you may not use this file except in compliance with the License.\\n+# You may obtain a copy of the License at\\n+#\\n+# http://www.apache.org/licenses/LICENSE-2.0\\n+#\\n+# Unless required by applicable law or agreed to in writing, software\\n+# distributed under the License is distributed on an \"AS IS\" BASIS,\\n+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\\n+# See the License for the specific language governing permissions and\\n+# limitations under the License.\\n+\\n+import argparse\\n+\\n+from accelerate.utils import write_basic_config\\n+\\n+from .config_utils import DYNAMO_BACKENDS\\n+\\n+\\n+description = \"Create a default config file for Accelerate with only a few flags set.\"\\n+\\n+\\n+def default_command_parser(subparsers=None):\\n+ if subparsers is not None:\\n+ parser = subparsers.add_parser(\"default-config\", description=description)\\n+ else:\\n+ parser = argparse.ArgumentParser(\"Accelerate default-config command\", description=description)\\n+\\n+ parser.add_argument(\\n+ \"--config_file\",\\n+ default=None,\\n+ help=(\\n+ \"The path to use to store the config file. Will default to a file named default_config.yaml in the cache \"\\n+ \"location, which is the content of the environment `HF_HOME` suffixed with \\'accelerate\\', or if you don\\'t have \"\\n+ \"such an environment variable, your cache directory (\\'~/.cache\\' or the content of `XDG_CACHE_HOME`) suffixed \"\\n+ \"with \\'huggingface\\'.\"\\n+ ),\\n+ dest=\"save_location\",\\n+ )\\n+\\n+ parser.add_argument(\\n+ \"--mixed_precision\",\\n+ choices=[\"no\", \"fp16\", \"bf16\"],\\n+ type=str,\\n+ help=\"Whether or not to use mixed precision training. \"\\n+ \"Choose between FP16 and BF16 (bfloat16) training. \"\\n+ \"BF16 training is only supported on Nvidia Ampere GPUs and PyTorch 1.10 or later.\",\\n+ default=\"no\",\\n+ )\\n+\\n+ parser.add_argument(\\n+ \"--dynamo_backend\",\\n+ type=str,\\n+ choices=[\"no\"] + [b.lower() for b in DYNAMO_BACKENDS],\\n+ help=\"Choose a backend to optimize your training with dynamo, see more at \"\\n+ \"https://github.com/pytorch/torchdynamo.\",\\n+ default=\"no\",\\n+ )\\n+\\n+ if subparsers is not None:\\n+ parser.set_defaults(func=config_command)\\n+ return parser',\n", - " 'from_author': False}],\n", - " 'context': [{'body': '_The documentation is not available anymore as the PR was closed or merged._',\n", - " 'from_author': False}],\n", - " 'url': 'https://api.github.com/repos/huggingface/accelerate/pulls/840'},\n", - " 1117850136: {'diff': 'diff --git a/src/accelerate/commands/menu/helpers.py b/src/accelerate/commands/menu/helpers.py\\nindex 2cc2ece3f..687f05c50 100644\\n--- a/src/accelerate/commands/menu/helpers.py\\n+++ b/src/accelerate/commands/menu/helpers.py\\n@@ -37,6 +37,10 @@ def forceWrite(content, end=\"\"):\\n sys.stdout.flush()\\n \\n \\n+def writeColor(content, color, end=\"\"):\\n+ forceWrite(f\"\\\\u001b[{color}m{content}\\\\u001b[0m\", end)\\n+\\n+\\n def reset_cursor():\\n forceWrite(\"\\\\r\")\\n \\ndiff --git a/src/accelerate/commands/menu/selection_menu.py b/src/accelerate/commands/menu/selection_menu.py\\nindex d4af34c8f..751f6818a 100644\\n--- a/src/accelerate/commands/menu/selection_menu.py\\n+++ b/src/accelerate/commands/menu/selection_menu.py\\n@@ -16,7 +16,7 @@\\n Main driver for the selection menu, based on https://github.com/bchao1/bullet\\n \"\"\"\\n from . import cursor, input\\n-from .helpers import Direction, clear_line, forceWrite, linebreak, move_cursor, reset_cursor\\n+from .helpers import Direction, clear_line, forceWrite, linebreak, move_cursor, reset_cursor, writeColor\\n from .keymap import KEYMAP\\n \\n \\n@@ -34,7 +34,8 @@ def __init__(self, prompt: str = None, choices: list = []):\\n def print_choice(self, index: int):\\n \"Prints the choice at the given index\"\\n if index == self.position:\\n- forceWrite(f\" ➔ {self.choices[index]}\")\\n+ forceWrite(\" ➔ \")\\n+ writeColor(self.choices[index], 32)\\n else:\\n forceWrite(f\" {self.choices[index]}\")\\n reset_cursor()\\n@@ -108,5 +109,6 @@ def run(self, default_choice: int = 0):\\n for _ in range(len(self.choices) + 1):\\n move_cursor(1, \"UP\")\\n clear_line()\\n- forceWrite(f\" * {self.choices[choice]}\", \"\\\\n\")\\n+ forceWrite(\" ➔ \")\\n+ writeColor(self.choices[choice], 32, \"\\\\n\")\\n return choice\\n',\n", - " 'code_comments': [],\n", - " 'context': [{'body': '_The documentation is not available anymore as the PR was closed or merged._',\n", - " 'from_author': False}],\n", - " 'url': 'https://api.github.com/repos/huggingface/accelerate/pulls/839'},\n", - " 1117172405: {'diff': 'diff --git a/src/accelerate/commands/config/cluster.py b/src/accelerate/commands/config/cluster.py\\nindex 15996bfd0..a653b9d6a 100644\\n--- a/src/accelerate/commands/config/cluster.py\\n+++ b/src/accelerate/commands/config/cluster.py\\n@@ -164,14 +164,13 @@ def get_cluster_input():\\n default=2,\\n )\\n \\n+ deepspeed_devices = [\"none\", \"cpu\", \"nvme\"]\\n if deepspeed_config[\"zero_stage\"] >= 2:\\n deepspeed_config[\"offload_optimizer_device\"] = _ask_options(\\n- \"Where to offload optimizer states?\",\\n- [\"none\", \"cpu\", \"nvme\"],\\n+ \"Where to offload optimizer states?\", deepspeed_devices, lambda x: deepspeed_devices[int(x)]\\n )\\n deepspeed_config[\"offload_param_device\"] = _ask_options(\\n- \"Where to offload parameters?\",\\n- [\"none\", \"cpu\", \"nvme\"],\\n+ \"Where to offload parameters?\", deepspeed_devices, lambda x: deepspeed_devices[int(x)]\\n )\\n deepspeed_config[\"gradient_accumulation_steps\"] = _ask_field(\\n \"How many gradient accumulation steps you\\'re passing in your script? [1]: \",\\n@@ -294,7 +293,6 @@ def get_cluster_input():\\n default=1e8,\\n )\\n fsdp_backward_prefetch_query = \"What should be your FSDP\\'s backward prefetch policy?\"\\n- fsdp_backward_prefetch_query = fsdp_backward_prefetch_query[:-2] + \")? [0]: \"\\n fsdp_config[\"fsdp_backward_prefetch_policy\"] = _ask_options(\\n fsdp_backward_prefetch_query,\\n FSDP_BACKWARD_PREFETCH,\\n',\n", - " 'code_comments': [],\n", - " 'context': [{'body': 'The docs for this PR live [here](/static-proxy?url=https%3A%2F%2Fmoon-ci-docs.huggingface.co%2Fdocs%2Faccelerate%2Fpr_836). All of your documentation changes will be reflected on that endpoint.',\n", - " 'from_author': False}],\n", - " 'url': 'https://api.github.com/repos/huggingface/accelerate/pulls/836'},\n", - " 1115322323: {'diff': 'diff --git a/src/accelerate/commands/config/__init__.py b/src/accelerate/commands/config/__init__.py\\nindex 11719835a..4b02ac40c 100644\\n--- a/src/accelerate/commands/config/__init__.py\\n+++ b/src/accelerate/commands/config/__init__.py\\n@@ -21,15 +21,15 @@\\n \\n from .cluster import get_cluster_input\\n from .config_args import cache_dir, default_config_file, default_yaml_config_file, load_config_from_file # noqa: F401\\n-from .config_utils import _ask_field, _convert_compute_environment\\n+from .config_utils import _ask_field, _ask_options, _convert_compute_environment # noqa: F401\\n from .sagemaker import get_sagemaker_input\\n \\n \\n def get_user_input():\\n- compute_environment = _ask_field(\\n- \"In which compute environment are you running? ([0] This machine, [1] AWS (Amazon SageMaker)): \",\\n+ compute_environment = _ask_options(\\n+ \"In which compute environment are you running?\",\\n+ [\"This machine\", \"AWS (Amazon SageMaker)\"],\\n _convert_compute_environment,\\n- error_message=\"Please enter 0 or 1\",\\n )\\n if compute_environment == ComputeEnvironment.AMAZON_SAGEMAKER:\\n config = get_sagemaker_input()\\ndiff --git a/src/accelerate/commands/config/cluster.py b/src/accelerate/commands/config/cluster.py\\nindex 5f032debc..15996bfd0 100644\\n--- a/src/accelerate/commands/config/cluster.py\\n+++ b/src/accelerate/commands/config/cluster.py\\n@@ -31,14 +31,21 @@\\n FSDP_STATE_DICT_TYPE,\\n )\\n from .config_args import ClusterConfig\\n-from .config_utils import _ask_field, _convert_distributed_mode, _convert_dynamo_backend, _convert_yes_no_to_bool\\n+from .config_utils import (\\n+ _ask_field,\\n+ _ask_options,\\n+ _convert_distributed_mode,\\n+ _convert_dynamo_backend,\\n+ _convert_mixed_precision,\\n+ _convert_yes_no_to_bool,\\n+)\\n \\n \\n def get_cluster_input():\\n- distributed_type = _ask_field(\\n- \"Which type of machine are you using? ([0] No distributed training, [1] multi-CPU, [2] multi-GPU, [3] TPU [4] MPS): \",\\n+ distributed_type = _ask_options(\\n+ \"Which type of machine are you using?\",\\n+ [\"No distributed training\", \"multi-CPU\", \"multi-GPU\", \"TPU\", \"MPS\"],\\n _convert_distributed_mode,\\n- error_message=\"Please enter 0, 1, 2, 3 or 4.\",\\n )\\n \\n machine_rank = 0\\n@@ -60,10 +67,10 @@ def get_cluster_input():\\n default=1,\\n )\\n if num_machines > 1:\\n- machine_rank = _ask_field(\\n- \"What is the rank of this machine (from 0 to the number of machines - 1 )? [0]: \",\\n+ machine_rank = _ask_options(\\n+ \"What is the rank of this machine?\",\\n+ list(range(num_machines)),\\n lambda x: int(x),\\n- default=0,\\n )\\n main_process_ip = _ask_field(\\n \"What is the IP address of the machine that will host the main process? \",\\n@@ -102,11 +109,22 @@ def get_cluster_input():\\n error_message=\"Please enter yes or no.\",\\n )\\n if use_dynamo:\\n- dynamo_backend = _ask_field(\\n- \"Which dynamo backend would you like to use? ([0] eager, [1] aot_eager, [2] inductor, [3] nvfuser, [5] aot_nvfuser, [6] aot_cudagraphs, [7] ofi, [8] onnxrt, [9] ipex) [2]: \",\\n+ dynamo_backend = _ask_options(\\n+ \"Which dynamo backend would you like to use?\",\\n+ [\\n+ \"eager\",\\n+ \"aot_eager\",\\n+ \"inductor\",\\n+ \"nvfuser\",\\n+ \"aot_nvfuser\",\\n+ \"aot_cudagraphs\",\\n+ \"ofi\",\\n+ \"fx2trt\",\\n+ \"onnxrt\",\\n+ \"ipex\",\\n+ ],\\n _convert_dynamo_backend,\\n- default=DynamoBackend.INDUCTOR,\\n- error_message=\"Please enter 0, 1, 2, 3, 4, 5, 6, 7, 8 or 9.\",\\n+ default=2,\\n )\\n else:\\n dynamo_backend = DynamoBackend.NO\\n@@ -139,22 +157,21 @@ def get_cluster_input():\\n default=\"none\",\\n )\\n else:\\n- deepspeed_config[\"zero_stage\"] = _ask_field(\\n- \"What should be your DeepSpeed\\'s ZeRO optimization stage (0, 1, 2, 3)? [2]: \",\\n+ deepspeed_config[\"zero_stage\"] = _ask_options(\\n+ \"What should be your DeepSpeed\\'s ZeRO optimization stage?\",\\n+ [0, 1, 2, 3],\\n lambda x: int(x),\\n default=2,\\n )\\n \\n if deepspeed_config[\"zero_stage\"] >= 2:\\n- deepspeed_config[\"offload_optimizer_device\"] = _ask_field(\\n- \"Where to offload optimizer states? [none/cpu/nvme]: \",\\n- lambda x: str(x),\\n- default=\"none\",\\n+ deepspeed_config[\"offload_optimizer_device\"] = _ask_options(\\n+ \"Where to offload optimizer states?\",\\n+ [\"none\", \"cpu\", \"nvme\"],\\n )\\n- deepspeed_config[\"offload_param_device\"] = _ask_field(\\n- \"Where to offload parameters? [none/cpu/nvme]: \",\\n- lambda x: str(x),\\n- default=\"none\",\\n+ deepspeed_config[\"offload_param_device\"] = _ask_options(\\n+ \"Where to offload parameters?\",\\n+ [\"none\", \"cpu\", \"nvme\"],\\n )\\n deepspeed_config[\"gradient_accumulation_steps\"] = _ask_field(\\n \"How many gradient accumulation steps you\\'re passing in your script? [1]: \",\\n@@ -194,14 +211,11 @@ def get_cluster_input():\\n )\\n \\n if num_machines > 1:\\n- launcher_query = \"Which Type of launcher do you want to use \"\\n- for i, launcher in enumerate(DEEPSPEED_MULTINODE_LAUNCHERS):\\n- launcher_query += f\"[{i}] {launcher}, \"\\n- launcher_query = launcher_query[:-2] + \")? [0]: \"\\n- deepspeed_config[\"deepspeed_multinode_launcher\"] = _ask_field(\\n+ launcher_query = \"Which Type of launcher do you want to use?\"\\n+ deepspeed_config[\"deepspeed_multinode_launcher\"] = _ask_options(\\n launcher_query,\\n+ DEEPSPEED_MULTINODE_LAUNCHERS,\\n lambda x: DEEPSPEED_MULTINODE_LAUNCHERS[int(x)],\\n- default=DEEPSPEED_MULTINODE_LAUNCHERS[0],\\n )\\n \\n if deepspeed_config[\"deepspeed_multinode_launcher\"] != DEEPSPEED_MULTINODE_LAUNCHERS[1]:\\n@@ -249,13 +263,11 @@ def get_cluster_input():\\n if use_fsdp:\\n distributed_type = DistributedType.FSDP\\n if distributed_type == DistributedType.FSDP:\\n- sharding_strategy_query = \"What should be your sharding strategy (\"\\n- for i, strategy in enumerate(FSDP_SHARDING_STRATEGY):\\n- sharding_strategy_query += f\"[{i+1}] {strategy}, \"\\n- sharding_strategy_query = sharding_strategy_query[:-2] + \")? [1]: \"\\n- fsdp_config[\"fsdp_sharding_strategy\"] = _ask_field(\\n+ sharding_strategy_query = \"What should be your sharding strategy?\"\\n+ fsdp_config[\"fsdp_sharding_strategy\"] = _ask_options(\\n sharding_strategy_query,\\n- lambda x: int(x),\\n+ FSDP_SHARDING_STRATEGY,\\n+ lambda x: int(x) + 1,\\n default=1,\\n )\\n fsdp_config[\"fsdp_offload_params\"] = _ask_field(\\n@@ -264,14 +276,11 @@ def get_cluster_input():\\n default=False,\\n error_message=\"Please enter yes or no.\",\\n )\\n- fsdp_wrap_query = \"What should be your auto wrap policy (\"\\n- for i, wrap_policy in enumerate(FSDP_AUTO_WRAP_POLICY):\\n- fsdp_wrap_query += f\"[{i}] {wrap_policy}, \"\\n- fsdp_wrap_query = fsdp_wrap_query[:-2] + \")? [0]: \"\\n- fsdp_config[\"fsdp_auto_wrap_policy\"] = _ask_field(\\n+ fsdp_wrap_query = \"What should be your auto wrap policy?\"\\n+ fsdp_config[\"fsdp_auto_wrap_policy\"] = _ask_options(\\n fsdp_wrap_query,\\n+ FSDP_AUTO_WRAP_POLICY,\\n lambda x: FSDP_AUTO_WRAP_POLICY[int(x)],\\n- default=\"TRANSFORMER_BASED_WRAP\",\\n )\\n if fsdp_config[\"fsdp_auto_wrap_policy\"] == FSDP_AUTO_WRAP_POLICY[0]:\\n fsdp_config[\"fsdp_transformer_layer_cls_to_wrap\"] = _ask_field(\\n@@ -284,23 +293,18 @@ def get_cluster_input():\\n lambda x: int(x),\\n default=1e8,\\n )\\n- fsdp_backward_prefetch_query = \"What should be your FSDP\\'s backward prefetch policy (\"\\n- for i, backward_prefetch_policy in enumerate(FSDP_BACKWARD_PREFETCH):\\n- fsdp_backward_prefetch_query += f\"[{i}] {backward_prefetch_policy}, \"\\n+ fsdp_backward_prefetch_query = \"What should be your FSDP\\'s backward prefetch policy?\"\\n fsdp_backward_prefetch_query = fsdp_backward_prefetch_query[:-2] + \")? [0]: \"\\n- fsdp_config[\"fsdp_backward_prefetch_policy\"] = _ask_field(\\n+ fsdp_config[\"fsdp_backward_prefetch_policy\"] = _ask_options(\\n fsdp_backward_prefetch_query,\\n+ FSDP_BACKWARD_PREFETCH,\\n lambda x: FSDP_BACKWARD_PREFETCH[int(x)],\\n- default=\"BACKWARD_PRE\",\\n )\\n- fsdp_state_dict_type_query = \"What should be your FSDP\\'s state dict type (\"\\n- for i, state_dict_type in enumerate(FSDP_STATE_DICT_TYPE):\\n- fsdp_state_dict_type_query += f\"[{i}] {state_dict_type}, \"\\n- fsdp_state_dict_type_query = fsdp_state_dict_type_query[:-2] + \")? [0]: \"\\n- fsdp_config[\"fsdp_state_dict_type\"] = _ask_field(\\n+ fsdp_state_dict_type_query = \"What should be your FSDP\\'s state dict type?\"\\n+ fsdp_config[\"fsdp_state_dict_type\"] = _ask_options(\\n fsdp_state_dict_type_query,\\n+ FSDP_STATE_DICT_TYPE,\\n lambda x: FSDP_STATE_DICT_TYPE[int(x)],\\n- default=\"FULL_STATE_DICT\",\\n )\\n \\n megatron_lm_config = {}\\n@@ -460,10 +464,10 @@ def get_cluster_input():\\n if distributed_type == DistributedType.DEEPSPEED and use_deepspeed_config:\\n mixed_precision = \"no\"\\n else:\\n- mixed_precision = _ask_field(\\n- \"Do you wish to use FP16 or BF16 (mixed precision)? [NO/fp16/bf16]: \",\\n- lambda x: str(x).lower(),\\n- default=\"no\",\\n+ mixed_precision = _ask_options(\\n+ \"Do you wish to use FP16 or BF16 (mixed precision)?\",\\n+ [\"no\", \"fp16\", \"bf16\"],\\n+ _convert_mixed_precision,\\n )\\n else:\\n mixed_precision = \"no\"\\ndiff --git a/src/accelerate/commands/config/config_utils.py b/src/accelerate/commands/config/config_utils.py\\nindex 8974cddc9..def29fe06 100644\\n--- a/src/accelerate/commands/config/config_utils.py\\n+++ b/src/accelerate/commands/config/config_utils.py\\n@@ -14,7 +14,14 @@\\n # See the License for the specific language governing permissions and\\n # limitations under the License.\\n \\n-from ...utils.dataclasses import ComputeEnvironment, DistributedType, DynamoBackend, SageMakerDistributedType\\n+from ...utils.dataclasses import (\\n+ ComputeEnvironment,\\n+ DistributedType,\\n+ DynamoBackend,\\n+ PrecisionType,\\n+ SageMakerDistributedType,\\n+)\\n+from ..menu import BulletMenu\\n \\n \\n DYNAMO_BACKENDS = [\\n@@ -44,6 +51,12 @@ def _ask_field(input_text, convert_value=None, default=None, error_message=None)\\n print(error_message)\\n \\n \\n+def _ask_options(input_text, options=[], convert_value=None, default=0):\\n+ menu = BulletMenu(input_text, options)\\n+ result = menu.run(default_choice=default)\\n+ return convert_value(result) if convert_value is not None else result\\n+\\n+\\n def _convert_compute_environment(value):\\n value = int(value)\\n return ComputeEnvironment([\"LOCAL_MACHINE\", \"AMAZON_SAGEMAKER\"][value])\\n@@ -56,7 +69,12 @@ def _convert_distributed_mode(value):\\n \\n def _convert_dynamo_backend(value):\\n value = int(value)\\n- return DynamoBackend(DYNAMO_BACKENDS[value + 1])\\n+ return DynamoBackend(DYNAMO_BACKENDS[value])\\n+\\n+\\n+def _convert_mixed_precision(value):\\n+ value = int(value)\\n+ return PrecisionType([\"no\", \"fp16\", \"bf16\"][value])\\n \\n \\n def _convert_sagemaker_distributed_mode(value):\\ndiff --git a/src/accelerate/commands/config/sagemaker.py b/src/accelerate/commands/config/sagemaker.py\\nindex 71de4fea9..22264b41f 100644\\n--- a/src/accelerate/commands/config/sagemaker.py\\n+++ b/src/accelerate/commands/config/sagemaker.py\\n@@ -22,7 +22,9 @@\\n from .config_args import SageMakerConfig\\n from .config_utils import (\\n _ask_field,\\n+ _ask_options,\\n _convert_dynamo_backend,\\n+ _convert_mixed_precision,\\n _convert_sagemaker_distributed_mode,\\n _convert_yes_no_to_bool,\\n )\\n@@ -92,8 +94,9 @@ def _get_iam_role_arn(role_name):\\n \\n \\n def get_sagemaker_input():\\n- credentials_configuration = _ask_field(\\n- \"How do you want to authorize? ([0] AWS Profile, [1] Credentials (AWS_ACCESS_KEY_ID, AWS_SECRET_ACCESS_KEY)): \",\\n+ credentials_configuration = _ask_options(\\n+ \"How do you want to authorize?\",\\n+ [\"AWS Profile\", \"Credentials (AWS_ACCESS_KEY_ID, AWS_SECRET_ACCESS_KEY) \"],\\n lambda x: int(x),\\n )\\n aws_profile = None\\n@@ -114,8 +117,9 @@ def get_sagemaker_input():\\n aws_region = _ask_field(\"Enter your AWS Region: [us-east-1]\", default=\"us-east-1\")\\n os.environ[\"AWS_DEFAULT_REGION\"] = aws_region\\n \\n- role_management = _ask_field(\\n- \"Do you already have an IAM Role for executing Amazon SageMaker Training Jobs? ([0] provide IAM Role name, [1] create new IAM role using credentials: \",\\n+ role_management = _ask_options(\\n+ \"Do you already have an IAM Role for executing Amazon SageMaker Training Jobs?\",\\n+ [\"Provide IAM Role name\", \"Create new IAM role using credentials\"],\\n lambda x: int(x),\\n )\\n if role_management == 0:\\n@@ -161,12 +165,11 @@ def get_sagemaker_input():\\n lambda x: str(x).lower(),\\n )\\n \\n- distributed_type = _ask_field(\\n- \"What is the distributed mode? ([0] No distributed training, [1] data parallelism): \",\\n+ distributed_type = _ask_options(\\n+ \"What is the distributed mode?\",\\n+ [\"No distributed training\", \"Data parallelism\"],\\n _convert_sagemaker_distributed_mode,\\n- error_message=\"Please enter 0 or 1\",\\n )\\n-\\n use_dynamo = _ask_field(\\n \"Do you wish to optimize your script with torch dynamo?[yes/NO]:\",\\n _convert_yes_no_to_bool,\\n@@ -174,22 +177,30 @@ def get_sagemaker_input():\\n error_message=\"Please enter yes or no.\",\\n )\\n if use_dynamo:\\n- dynamo_backend = _ask_field(\\n- \"Which dynamo backend would you like to use? ([0] eager, [1] aot_eager, [2] inductor, [3] nvfuser, [5] aot_nvfuser, [6] aot_cudagraphs, [7] ofi, [8] onnxrt, [9] ipex) [2]: \",\\n+ dynamo_backend = _ask_options(\\n+ \"Which dynamo backend would you like to use?\",\\n+ [\\n+ \"eager\",\\n+ \"aot_eager\",\\n+ \"inductor\",\\n+ \"nvfuser\",\\n+ \"aot_nvfuser\",\\n+ \"aot_cudagraphs\",\\n+ \"ofi\",\\n+ \"fx2trt\",\\n+ \"onnxrt\",\\n+ \"ipex\",\\n+ ],\\n _convert_dynamo_backend,\\n- default=DynamoBackend.INDUCTOR,\\n- error_message=\"Please enter 0, 1, 2, 3, 4, 5, 6, 7, 8 or 9.\",\\n+ default=2,\\n )\\n else:\\n dynamo_backend = DynamoBackend.NO\\n-\\n- ec2_instance_query = \"Which EC2 instance type you want to use for your training \"\\n+ ec2_instance_query = \"Which EC2 instance type you want to use for your training?\"\\n if distributed_type != SageMakerDistributedType.NO:\\n- ec2_instance_query += \"(\"\\n- for i, instance_type in enumerate(SAGEMAKER_PARALLEL_EC2_INSTANCES):\\n- ec2_instance_query += f\"[{i}] {instance_type}, \"\\n- ec2_instance_query = ec2_instance_query[:-2] + \")? [0]: \"\\n- ec2_instance_type = _ask_field(ec2_instance_query, lambda x: SAGEMAKER_PARALLEL_EC2_INSTANCES[int(x)])\\n+ ec2_instance_type = _ask_options(\\n+ ec2_instance_query, SAGEMAKER_PARALLEL_EC2_INSTANCES, lambda x: SAGEMAKER_PARALLEL_EC2_INSTANCES[int(x)]\\n+ )\\n else:\\n ec2_instance_query += \"? [ml.p3.2xlarge]:\"\\n ec2_instance_type = _ask_field(ec2_instance_query, lambda x: str(x).lower(), default=\"ml.p3.2xlarge\")\\n@@ -205,10 +216,10 @@ def get_sagemaker_input():\\n default=1,\\n )\\n \\n- mixed_precision = _ask_field(\\n- \"Do you wish to use FP16 or BF16 (mixed precision)? [No/FP16/BF16]: \",\\n- lambda x: str(x).lower(),\\n- default=\"no\",\\n+ mixed_precision = _ask_options(\\n+ \"Do you wish to use FP16 or BF16 (mixed precision)?\",\\n+ [\"no\", \"fp16\", \"bf16\"],\\n+ _convert_mixed_precision,\\n )\\n \\n if use_dynamo and mixed_precision == \"no\":\\ndiff --git a/src/accelerate/commands/menu/__init__.py b/src/accelerate/commands/menu/__init__.py\\nnew file mode 100644\\nindex 000000000..ec17fba58\\n--- /dev/null\\n+++ b/src/accelerate/commands/menu/__init__.py\\n@@ -0,0 +1,5 @@\\n+# flake8: noqa\\n+# There\\'s no way to ignore \"F401 \\'...\\' imported but unused\" warnings in this\\n+# module, but to preserve other warnings. So, don\\'t check this module at all\\n+\\n+from .selection_menu import BulletMenu\\ndiff --git a/src/accelerate/commands/menu/cursor.py b/src/accelerate/commands/menu/cursor.py\\nnew file mode 100644\\nindex 000000000..c1f0bb7b6\\n--- /dev/null\\n+++ b/src/accelerate/commands/menu/cursor.py\\n@@ -0,0 +1,65 @@\\n+# Copyright 2022 The HuggingFace Team and Brian Chao. All rights reserved.\\n+#\\n+# Licensed under the Apache License, Version 2.0 (the \"License\");\\n+# you may not use this file except in compliance with the License.\\n+# You may obtain a copy of the License at\\n+#\\n+# http://www.apache.org/licenses/LICENSE-2.0\\n+#\\n+# Unless required by applicable law or agreed to in writing, software\\n+# distributed under the License is distributed on an \"AS IS\" BASIS,\\n+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\\n+# See the License for the specific language governing permissions and\\n+# limitations under the License.\\n+\\n+\"\"\"\\n+A utility for showing and hiding the terminal cursor on Windows and Linux, based on https://github.com/bchao1/bullet\\n+\"\"\"\\n+\\n+import os\\n+import sys\\n+from contextlib import contextmanager\\n+\\n+\\n+# Windows only\\n+if os.name == \"nt\":\\n+ import ctypes\\n+ import msvcrt # noqa\\n+\\n+ class CursorInfo(ctypes.Structure):\\n+ # _fields is a specific attr expected by ctypes\\n+ _fields_ = [(\"size\", ctypes.c_int), (\"visible\", ctypes.c_byte)]\\n+\\n+\\n+def hide_cursor():\\n+ if os.name == \"nt\":\\n+ ci = CursorInfo()\\n+ handle = ctypes.windll.kernel32.GetStdHandle(-11)\\n+ ctypes.windll.kernel32.GetConsoleCursorInfo(handle, ctypes.byref(ci))\\n+ ci.visible = False\\n+ ctypes.windll.kernel32.SetConsoleCursorInfo(handle, ctypes.byref(ci))\\n+ elif os.name == \"posix\":\\n+ sys.stdout.write(\"\\\\033[?25l\")\\n+ sys.stdout.flush()\\n+\\n+\\n+def show_cursor():\\n+ if os.name == \"nt\":\\n+ ci = CursorInfo()\\n+ handle = ctypes.windll.kernel32.GetStdHandle(-11)\\n+ ctypes.windll.kernel32.GetConsoleCursorInfo(handle, ctypes.byref(ci))\\n+ ci.visible = True\\n+ ctypes.windll.kernel32.SetConsoleCursorInfo(handle, ctypes.byref(ci))\\n+ elif os.name == \"posix\":\\n+ sys.stdout.write(\"\\\\033[?25h\")\\n+ sys.stdout.flush()\\n+\\n+\\n+@contextmanager\\n+def hide():\\n+ \"Context manager to hide the terminal cursor\"\\n+ try:\\n+ hide_cursor()\\n+ yield\\n+ finally:\\n+ show_cursor()\\ndiff --git a/src/accelerate/commands/menu/helpers.py b/src/accelerate/commands/menu/helpers.py\\nnew file mode 100644\\nindex 000000000..2cc2ece3f\\n--- /dev/null\\n+++ b/src/accelerate/commands/menu/helpers.py\\n@@ -0,0 +1,55 @@\\n+# Copyright 2022 The HuggingFace Team and Brian Chao. All rights reserved.\\n+#\\n+# Licensed under the Apache License, Version 2.0 (the \"License\");\\n+# you may not use this file except in compliance with the License.\\n+# You may obtain a copy of the License at\\n+#\\n+# http://www.apache.org/licenses/LICENSE-2.0\\n+#\\n+# Unless required by applicable law or agreed to in writing, software\\n+# distributed under the License is distributed on an \"AS IS\" BASIS,\\n+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\\n+# See the License for the specific language governing permissions and\\n+# limitations under the License.\\n+\\n+\"\"\"\\n+A variety of helper functions and constants when dealing with terminal menu choices, based on\\n+https://github.com/bchao1/bullet\\n+\"\"\"\\n+\\n+import enum\\n+import shutil\\n+import sys\\n+\\n+\\n+TERMINAL_WIDTH, _ = shutil.get_terminal_size()\\n+\\n+CURSOR_TO_CHAR = {\"UP\": \"A\", \"DOWN\": \"B\", \"RIGHT\": \"C\", \"LEFT\": \"D\"}\\n+\\n+\\n+class Direction(enum.Enum):\\n+ UP = 0\\n+ DOWN = 1\\n+\\n+\\n+def forceWrite(content, end=\"\"):\\n+ sys.stdout.write(content + end)\\n+ sys.stdout.flush()\\n+\\n+\\n+def reset_cursor():\\n+ forceWrite(\"\\\\r\")\\n+\\n+\\n+def move_cursor(num_lines: int, direction: str):\\n+ forceWrite(f\"\\\\033[{num_lines}{CURSOR_TO_CHAR[direction.upper()]}\")\\n+\\n+\\n+def clear_line():\\n+ forceWrite(\" \" * TERMINAL_WIDTH)\\n+ reset_cursor()\\n+\\n+\\n+def linebreak():\\n+ reset_cursor()\\n+ forceWrite(\"-\" * TERMINAL_WIDTH)\\ndiff --git a/src/accelerate/commands/menu/input.py b/src/accelerate/commands/menu/input.py\\nnew file mode 100644\\nindex 000000000..266f7e7db\\n--- /dev/null\\n+++ b/src/accelerate/commands/menu/input.py\\n@@ -0,0 +1,86 @@\\n+# Copyright 2022 The HuggingFace Team and Brian Chao. All rights reserved.\\n+#\\n+# Licensed under the Apache License, Version 2.0 (the \"License\");\\n+# you may not use this file except in compliance with the License.\\n+# You may obtain a copy of the License at\\n+#\\n+# http://www.apache.org/licenses/LICENSE-2.0\\n+#\\n+# Unless required by applicable law or agreed to in writing, software\\n+# distributed under the License is distributed on an \"AS IS\" BASIS,\\n+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\\n+# See the License for the specific language governing permissions and\\n+# limitations under the License.\\n+\\n+\"\"\"\\n+This file contains utilities for handling input from the user and registering specific keys to specific functions,\\n+based on https://github.com/bchao1/bullet\\n+\"\"\"\\n+\\n+from typing import List\\n+\\n+from .keymap import KEYMAP, get_character\\n+\\n+\\n+def mark(key: str):\\n+ \"\"\"\\n+ Mark the function with the key code so it can be handled in the register\\n+ \"\"\"\\n+\\n+ def decorator(func):\\n+ handle = getattr(func, \"handle_key\", [])\\n+ handle += [key]\\n+ setattr(func, \"handle_key\", handle)\\n+ return func\\n+\\n+ return decorator\\n+\\n+\\n+def mark_multiple(*keys: List[str]):\\n+ \"\"\"\\n+ Mark the function with the key codes so it can be handled in the register\\n+ \"\"\"\\n+\\n+ def decorator(func):\\n+ handle = getattr(func, \"handle_key\", [])\\n+ handle += keys\\n+ setattr(func, \"handle_key\", handle)\\n+ return func\\n+\\n+ return decorator\\n+\\n+\\n+class KeyHandler(type):\\n+ \"\"\"\\n+ Metaclass that adds the key handlers to the class\\n+ \"\"\"\\n+\\n+ def __new__(cls, name, bases, attrs):\\n+ new_cls = super().__new__(cls, name, bases, attrs)\\n+ if not hasattr(new_cls, \"key_handler\"):\\n+ setattr(new_cls, \"key_handler\", {})\\n+ setattr(new_cls, \"handle_input\", KeyHandler.handle_input)\\n+\\n+ for value in attrs.values():\\n+ handled_keys = getattr(value, \"handle_key\", [])\\n+ for key in handled_keys:\\n+ new_cls.key_handler[key] = value\\n+ return new_cls\\n+\\n+ @staticmethod\\n+ def handle_input(cls):\\n+ \"Finds and returns the selected character if it exists in the handler\"\\n+ char = get_character()\\n+ if char != KEYMAP[\"undefined\"]:\\n+ char = ord(char)\\n+ handler = cls.key_handler.get(char)\\n+ if handler:\\n+ cls.current_selection = char\\n+ return handler(cls)\\n+ else:\\n+ return None\\n+\\n+\\n+def register(cls):\\n+ \"\"\"Adds KeyHandler metaclass to the class\"\"\"\\n+ return KeyHandler(cls.__name__, cls.__bases__, cls.__dict__.copy())\\ndiff --git a/src/accelerate/commands/menu/keymap.py b/src/accelerate/commands/menu/keymap.py\\nnew file mode 100644\\nindex 000000000..7ce6c0637\\n--- /dev/null\\n+++ b/src/accelerate/commands/menu/keymap.py\\n@@ -0,0 +1,81 @@\\n+# Copyright 2022 The HuggingFace Team and Brian Chao. All rights reserved.\\n+#\\n+# Licensed under the Apache License, Version 2.0 (the \"License\");\\n+# you may not use this file except in compliance with the License.\\n+# You may obtain a copy of the License at\\n+#\\n+# http://www.apache.org/licenses/LICENSE-2.0\\n+#\\n+# Unless required by applicable law or agreed to in writing, software\\n+# distributed under the License is distributed on an \"AS IS\" BASIS,\\n+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\\n+# See the License for the specific language governing permissions and\\n+# limitations under the License.\\n+\\n+\"\"\"\\n+Utilities relating to parsing raw characters from the keyboard, based on https://github.com/bchao1/bullet\\n+\"\"\"\\n+\\n+\\n+import string\\n+import sys\\n+import termios\\n+import tty\\n+\\n+\\n+ARROW_KEY_FLAG = 1 << 8\\n+\\n+KEYMAP = {\\n+ \"tab\": ord(\"\\\\t\"),\\n+ \"newline\": ord(\"\\\\r\"),\\n+ \"esc\": 27,\\n+ \"up\": 65 + ARROW_KEY_FLAG,\\n+ \"down\": 66 + ARROW_KEY_FLAG,\\n+ \"right\": 67 + ARROW_KEY_FLAG,\\n+ \"left\": 68 + ARROW_KEY_FLAG,\\n+ \"mod_int\": 91,\\n+ \"undefined\": sys.maxsize,\\n+ \"interrupt\": 3,\\n+}\\n+\\n+KEYMAP[\"arrow_begin\"] = KEYMAP[\"up\"]\\n+KEYMAP[\"arrow_end\"] = KEYMAP[\"left\"]\\n+\\n+for i in range(10):\\n+ KEYMAP[str(i)] = ord(str(i))\\n+\\n+\\n+def get_raw_chars():\\n+ \"Gets raw characters from inputs\"\\n+ fd = sys.stdin.fileno()\\n+ old_settings = termios.tcgetattr(fd)\\n+ try:\\n+ tty.setraw(fd)\\n+ ch = sys.stdin.read(1)\\n+ finally:\\n+ termios.tcsetattr(fd, termios.TCSADRAIN, old_settings)\\n+ return ch\\n+\\n+\\n+def get_character():\\n+ \"Gets a character from the keyboard and returns the key code\"\\n+ char = get_raw_chars()\\n+ if ord(char) in [KEYMAP[\"interrupt\"], KEYMAP[\"newline\"]]:\\n+ return char\\n+\\n+ elif ord(char) == KEYMAP[\"esc\"]:\\n+ combo = get_raw_chars()\\n+ if ord(combo) == KEYMAP[\"mod_int\"]:\\n+ key = get_raw_chars()\\n+ if ord(key) >= KEYMAP[\"arrow_begin\"] - ARROW_KEY_FLAG and ord(key) <= KEYMAP[\"arrow_end\"] - ARROW_KEY_FLAG:\\n+ return chr(ord(key) + ARROW_KEY_FLAG)\\n+ else:\\n+ return KEYMAP[\"undefined\"]\\n+ else:\\n+ return get_raw_chars()\\n+\\n+ else:\\n+ if char in string.printable:\\n+ return char\\n+ else:\\n+ return KEYMAP[\"undefined\"]\\ndiff --git a/src/accelerate/commands/menu/selection_menu.py b/src/accelerate/commands/menu/selection_menu.py\\nnew file mode 100644\\nindex 000000000..d4af34c8f\\n--- /dev/null\\n+++ b/src/accelerate/commands/menu/selection_menu.py\\n@@ -0,0 +1,112 @@\\n+# Copyright 2022 The HuggingFace Team and Brian Chao. All rights reserved.\\n+#\\n+# Licensed under the Apache License, Version 2.0 (the \"License\");\\n+# you may not use this file except in compliance with the License.\\n+# You may obtain a copy of the License at\\n+#\\n+# http://www.apache.org/licenses/LICENSE-2.0\\n+#\\n+# Unless required by applicable law or agreed to in writing, software\\n+# distributed under the License is distributed on an \"AS IS\" BASIS,\\n+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\\n+# See the License for the specific language governing permissions and\\n+# limitations under the License.\\n+\\n+\"\"\"\\n+Main driver for the selection menu, based on https://github.com/bchao1/bullet\\n+\"\"\"\\n+from . import cursor, input\\n+from .helpers import Direction, clear_line, forceWrite, linebreak, move_cursor, reset_cursor\\n+from .keymap import KEYMAP\\n+\\n+\\n+@input.register\\n+class BulletMenu:\\n+ \"\"\"\\n+ A CLI menu to select a choice from a list of choices using the keyboard.\\n+ \"\"\"\\n+\\n+ def __init__(self, prompt: str = None, choices: list = []):\\n+ self.position = 0\\n+ self.choices = choices\\n+ self.prompt = prompt\\n+\\n+ def print_choice(self, index: int):\\n+ \"Prints the choice at the given index\"\\n+ if index == self.position:\\n+ forceWrite(f\" ➔ {self.choices[index]}\")\\n+ else:\\n+ forceWrite(f\" {self.choices[index]}\")\\n+ reset_cursor()\\n+\\n+ def move_direction(self, direction: Direction, num_spaces: int = 1):\\n+ \"Should not be directly called, used to move a direction of either up or down\"\\n+ old_position = self.position\\n+ if direction == Direction.DOWN:\\n+ if self.position + 1 >= len(self.choices):\\n+ return\\n+ self.position += num_spaces\\n+ else:\\n+ if self.position - 1 < 0:\\n+ return\\n+ self.position -= num_spaces\\n+ clear_line()\\n+ self.print_choice(old_position)\\n+ move_cursor(num_spaces, direction.name)\\n+ self.print_choice(self.position)\\n+\\n+ @input.mark(KEYMAP[\"up\"])\\n+ def move_up(self):\\n+ self.move_direction(Direction.UP)\\n+\\n+ @input.mark(KEYMAP[\"down\"])\\n+ def move_down(self):\\n+ self.move_direction(Direction.DOWN)\\n+\\n+ @input.mark(KEYMAP[\"newline\"])\\n+ def select(self):\\n+ move_cursor(len(self.choices) - self.position, \"DOWN\")\\n+ return self.position\\n+\\n+ @input.mark(KEYMAP[\"interrupt\"])\\n+ def interrupt(self):\\n+ move_cursor(len(self.choices) - self.position, \"DOWN\")\\n+ raise KeyboardInterrupt\\n+\\n+ @input.mark_multiple(*[KEYMAP[str(number)] for number in range(10)])\\n+ def select_row(self):\\n+ index = int(chr(self.current_selection))\\n+ movement = index - self.position\\n+ if index == self.position:\\n+ return\\n+ if index < len(self.choices):\\n+ if self.position > index:\\n+ self.move_direction(Direction.UP, -movement)\\n+ elif self.position < index:\\n+ self.move_direction(Direction.DOWN, movement)\\n+ else:\\n+ return\\n+ else:\\n+ return\\n+\\n+ def run(self, default_choice: int = 0):\\n+ \"Start the menu and return the selected choice\"\\n+ if self.prompt:\\n+ linebreak()\\n+ forceWrite(self.prompt, \"\\\\n\")\\n+ forceWrite(\"Please select a choice using the arrow or number keys, and selecting with enter\", \"\\\\n\")\\n+ self.position = default_choice\\n+ for i in range(len(self.choices)):\\n+ self.print_choice(i)\\n+ forceWrite(\"\\\\n\")\\n+ move_cursor(len(self.choices) - self.position, \"UP\")\\n+ with cursor.hide():\\n+ while True:\\n+ choice = self.handle_input()\\n+ if choice is not None:\\n+ reset_cursor()\\n+ for _ in range(len(self.choices) + 1):\\n+ move_cursor(1, \"UP\")\\n+ clear_line()\\n+ forceWrite(f\" * {self.choices[choice]}\", \"\\\\n\")\\n+ return choice\\n',\n", - " 'code_comments': [{'body': \"Let's name this submodule `menu` since it's about that?\",\n", - " 'diff_hunk': '@@ -0,0 +1,5 @@\\n+# flake8: noqa',\n", - " 'from_author': False},\n", - " {'body': 'You know how I feel about _ prefixing names.',\n", - " 'diff_hunk': '@@ -0,0 +1,64 @@\\n+# Copyright 2022 The HuggingFace Team. All rights reserved.\\n+#\\n+# Licensed under the Apache License, Version 2.0 (the \"License\");\\n+# you may not use this file except in compliance with the License.\\n+# You may obtain a copy of the License at\\n+#\\n+# http://www.apache.org/licenses/LICENSE-2.0\\n+#\\n+# Unless required by applicable law or agreed to in writing, software\\n+# distributed under the License is distributed on an \"AS IS\" BASIS,\\n+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\\n+# See the License for the specific language governing permissions and\\n+# limitations under the License.\\n+\\n+\"\"\"\\n+A utility for showing and hiding the terminal cursor on Windows and Linux, based on https://github.com/bchao1/bullet\\n+\"\"\"\\n+\\n+import os\\n+import sys\\n+from contextlib import contextmanager\\n+\\n+\\n+# Windows only\\n+if os.name == \"nt\":\\n+ import ctypes\\n+ import msvcrt # noqa\\n+\\n+ class _CursorInfo(ctypes.Structure):\\n+ _fields_ = [(\"size\", ctypes.c_int), (\"visible\", ctypes.c_byte)]',\n", - " 'from_author': False},\n", - " {'body': 'If this is heavily inspired by another lib, it needs to be reflected in the copyright probably.',\n", - " 'diff_hunk': '@@ -0,0 +1,64 @@\\n+# Copyright 2022 The HuggingFace Team. All rights reserved.',\n", - " 'from_author': False},\n", - " {'body': '`_fields_` is very specific to `ctypes.Structure`, but will rename `CursorInfo`',\n", - " 'diff_hunk': '@@ -0,0 +1,64 @@\\n+# Copyright 2022 The HuggingFace Team. All rights reserved.\\n+#\\n+# Licensed under the Apache License, Version 2.0 (the \"License\");\\n+# you may not use this file except in compliance with the License.\\n+# You may obtain a copy of the License at\\n+#\\n+# http://www.apache.org/licenses/LICENSE-2.0\\n+#\\n+# Unless required by applicable law or agreed to in writing, software\\n+# distributed under the License is distributed on an \"AS IS\" BASIS,\\n+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\\n+# See the License for the specific language governing permissions and\\n+# limitations under the License.\\n+\\n+\"\"\"\\n+A utility for showing and hiding the terminal cursor on Windows and Linux, based on https://github.com/bchao1/bullet\\n+\"\"\"\\n+\\n+import os\\n+import sys\\n+from contextlib import contextmanager\\n+\\n+\\n+# Windows only\\n+if os.name == \"nt\":\\n+ import ctypes\\n+ import msvcrt # noqa\\n+\\n+ class _CursorInfo(ctypes.Structure):\\n+ _fields_ = [(\"size\", ctypes.c_int), (\"visible\", ctypes.c_byte)]',\n", - " 'from_author': True},\n", - " {'body': 'This and every function in this module pretty please!',\n", - " 'diff_hunk': '@@ -0,0 +1,64 @@\\n+# Copyright 2022 The HuggingFace Team. All rights reserved.\\n+#\\n+# Licensed under the Apache License, Version 2.0 (the \"License\");\\n+# you may not use this file except in compliance with the License.\\n+# You may obtain a copy of the License at\\n+#\\n+# http://www.apache.org/licenses/LICENSE-2.0\\n+#\\n+# Unless required by applicable law or agreed to in writing, software\\n+# distributed under the License is distributed on an \"AS IS\" BASIS,\\n+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\\n+# See the License for the specific language governing permissions and\\n+# limitations under the License.\\n+\\n+\"\"\"\\n+A utility for showing and hiding the terminal cursor on Windows and Linux, based on https://github.com/bchao1/bullet\\n+\"\"\"\\n+\\n+import os\\n+import sys\\n+from contextlib import contextmanager\\n+\\n+\\n+# Windows only\\n+if os.name == \"nt\":\\n+ import ctypes\\n+ import msvcrt # noqa\\n+\\n+ class _CursorInfo(ctypes.Structure):\\n+ _fields_ = [(\"size\", ctypes.c_int), (\"visible\", ctypes.c_byte)]',\n", - " 'from_author': False},\n", - " {'body': '```suggestion\\r\\n lambda x: int(x)+1,\\r\\n```',\n", - " 'diff_hunk': '@@ -227,12 +223,10 @@ def get_cluster_input():\\n if use_fsdp:\\n distributed_type = DistributedType.FSDP\\n if distributed_type == DistributedType.FSDP:\\n- sharding_strategy_query = \"What should be your sharding strategy (\"\\n- for i, strategy in enumerate(FSDP_SHARDING_STRATEGY):\\n- sharding_strategy_query += f\"[{i+1}] {strategy}, \"\\n- sharding_strategy_query = sharding_strategy_query[:-2] + \")? [1]: \"\\n- fsdp_config[\"fsdp_sharding_strategy\"] = _ask_field(\\n+ sharding_strategy_query = \"What should be your sharding strategy?\"\\n+ fsdp_config[\"fsdp_sharding_strategy\"] = _ask_options(\\n sharding_strategy_query,\\n+ FSDP_SHARDING_STRATEGY,\\n lambda x: int(x),',\n", - " 'from_author': False},\n", - " {'body': \"Because it's 1 based numbering\",\n", - " 'diff_hunk': '@@ -227,12 +223,10 @@ def get_cluster_input():\\n if use_fsdp:\\n distributed_type = DistributedType.FSDP\\n if distributed_type == DistributedType.FSDP:\\n- sharding_strategy_query = \"What should be your sharding strategy (\"\\n- for i, strategy in enumerate(FSDP_SHARDING_STRATEGY):\\n- sharding_strategy_query += f\"[{i+1}] {strategy}, \"\\n- sharding_strategy_query = sharding_strategy_query[:-2] + \")? [1]: \"\\n- fsdp_config[\"fsdp_sharding_strategy\"] = _ask_field(\\n+ sharding_strategy_query = \"What should be your sharding strategy?\"\\n+ fsdp_config[\"fsdp_sharding_strategy\"] = _ask_options(\\n sharding_strategy_query,\\n+ FSDP_SHARDING_STRATEGY,\\n lambda x: int(x),',\n", - " 'from_author': False},\n", - " {'body': 'Missing the conversion function here.',\n", - " 'diff_hunk': '@@ -460,10 +463,9 @@ def get_cluster_input():\\n if distributed_type == DistributedType.DEEPSPEED and use_deepspeed_config:\\n mixed_precision = \"no\"\\n else:\\n- mixed_precision = _ask_field(\\n- \"Do you wish to use FP16 or BF16 (mixed precision)? [NO/fp16/bf16]: \",\\n- lambda x: str(x).lower(),\\n- default=\"no\",\\n+ mixed_precision = _ask_options(\\n+ \"Do you wish to use FP16 or BF16 (mixed precision)?\",\\n+ [\"no\", \"fp16\", \"bf16\"],',\n", - " 'from_author': False},\n", - " {'body': 'Thanks! Fixed here and for sagemaker',\n", - " 'diff_hunk': '@@ -460,10 +463,9 @@ def get_cluster_input():\\n if distributed_type == DistributedType.DEEPSPEED and use_deepspeed_config:\\n mixed_precision = \"no\"\\n else:\\n- mixed_precision = _ask_field(\\n- \"Do you wish to use FP16 or BF16 (mixed precision)? [NO/fp16/bf16]: \",\\n- lambda x: str(x).lower(),\\n- default=\"no\",\\n+ mixed_precision = _ask_options(\\n+ \"Do you wish to use FP16 or BF16 (mixed precision)?\",\\n+ [\"no\", \"fp16\", \"bf16\"],',\n", - " 'from_author': True},\n", - " {'body': 'This line (297) should have been removed',\n", - " 'diff_hunk': '@@ -284,23 +293,18 @@ def get_cluster_input():\\n lambda x: int(x),\\n default=1e8,\\n )\\n- fsdp_backward_prefetch_query = \"What should be your FSDP\\'s backward prefetch policy (\"\\n- for i, backward_prefetch_policy in enumerate(FSDP_BACKWARD_PREFETCH):\\n- fsdp_backward_prefetch_query += f\"[{i}] {backward_prefetch_policy}, \"\\n+ fsdp_backward_prefetch_query = \"What should be your FSDP\\'s backward prefetch policy?\"\\n fsdp_backward_prefetch_query = fsdp_backward_prefetch_query[:-2] + \")? [0]: \"',\n", - " 'from_author': False}],\n", - " 'context': [{'body': 'The docs for this PR live [here](/static-proxy?url=https%3A%2F%2Fmoon-ci-docs.huggingface.co%2Fdocs%2Faccelerate%2Fpr_830). All of your documentation changes will be reflected on that endpoint.',\n", - " 'from_author': False},\n", - " {'body': 'The docs for this PR live [here](/static-proxy?url=https%3A%2F%2Fmoon-ci-docs.huggingface.co%2Fdocs%2Faccelerate%2Fpr_830). All of your documentation changes will be reflected on that endpoint.',\n", - " 'from_author': False},\n", - " {'body': 'The docs for this PR live [here](/static-proxy?url=https%3A%2F%2Fmoon-ci-docs.huggingface.co%2Fdocs%2Faccelerate%2Fpr_830). All of your documentation changes will be reflected on that endpoint.',\n", - " 'from_author': False},\n", - " {'body': 'The docs for this PR live [here](/static-proxy?url=https%3A%2F%2Fmoon-ci-docs.huggingface.co%2Fdocs%2Faccelerate%2Fpr_830). All of your documentation changes will be reflected on that endpoint.',\n", - " 'from_author': False},\n", - " {'body': 'The docs for this PR live [here](/static-proxy?url=https%3A%2F%2Fmoon-ci-docs.huggingface.co%2Fdocs%2Faccelerate%2Fpr_830). All of your documentation changes will be reflected on that endpoint.',\n", - " 'from_author': False},\n", - " {'body': 'The docs for this PR live [here](/static-proxy?url=https%3A%2F%2Fmoon-ci-docs.huggingface.co%2Fdocs%2Faccelerate%2Fpr_830). All of your documentation changes will be reflected on that endpoint.',\n", - " 'from_author': False}],\n", - " 'url': 'https://api.github.com/repos/huggingface/accelerate/pulls/830'},\n", - " 1114981317: {'diff': 'diff --git a/src/accelerate/accelerator.py b/src/accelerate/accelerator.py\\nindex 63ea4a71b..ad22c8ee4 100644\\n--- a/src/accelerate/accelerator.py\\n+++ b/src/accelerate/accelerator.py\\n@@ -36,6 +36,7 @@\\n DeepSpeedPlugin,\\n DistributedDataParallelKwargs,\\n DistributedType,\\n+ DynamoBackend,\\n FullyShardedDataParallelPlugin,\\n GradScalerKwargs,\\n InitProcessGroupKwargs,\\n@@ -163,6 +164,8 @@ class Accelerator:\\n kwargs_handlers (`List[KwargHandler]`, *optional*)\\n A list of `KwargHandler` to customize how the objects related to distributed training or mixed precision\\n are created. See [kwargs](kwargs) for more information.\\n+ dynamo_backend (`str` or `DynamoBackend`, *optional*, defaults to `\"no\"`):\\n+ Set to one of the possible dynamo backends to optimize your training with torch dynamo.\\n \\n **Available attributes:**\\n \\n@@ -198,6 +201,7 @@ def __init__(\\n even_batches: bool = True,\\n step_scheduler_with_optimizer: bool = True,\\n kwargs_handlers: Optional[List[KwargsHandler]] = None,\\n+ dynamo_backend: Union[DynamoBackend, str] = None,\\n ):\\n self.logging_dir = logging_dir\\n trackers = filter_trackers(log_with, self.logging_dir)\\n@@ -219,6 +223,9 @@ def __init__(\\n )\\n mixed_precision = \"fp16\"\\n \\n+ if dynamo_backend is not None:\\n+ dynamo_backend = DynamoBackend(dynamo_backend.upper())\\n+\\n if deepspeed_plugin is None: # init from env variables\\n deepspeed_plugin = DeepSpeedPlugin() if os.environ.get(\"USE_DEEPSPEED\", \"false\") == \"true\" else None\\n else:\\n@@ -285,6 +292,7 @@ def __init__(\\n self.state = AcceleratorState(\\n mixed_precision=mixed_precision,\\n cpu=cpu,\\n+ dynamo_backend=dynamo_backend,\\n deepspeed_plugin=deepspeed_plugin,\\n fsdp_plugin=fsdp_plugin,\\n megatron_lm_plugin=megatron_lm_plugin,\\n@@ -793,6 +801,10 @@ def prepare_model(self, model: torch.nn.Module, device_placement=None):\\n self._models.append(model)\\n if device_placement:\\n model = model.to(self.device)\\n+ if self.state.dynamo_backend != DynamoBackend.NO:\\n+ import torch._dynamo as dynamo\\n+\\n+ model = dynamo.optimize(self.state.dynamo_backend.value.lower())(model)\\n if self.distributed_type == DistributedType.MULTI_GPU:\\n if any(p.requires_grad for p in model.parameters()):\\n kwargs = self.ddp_handler.to_kwargs() if self.ddp_handler is not None else {}\\ndiff --git a/src/accelerate/commands/config/cluster.py b/src/accelerate/commands/config/cluster.py\\nindex aa61a6857..5f032debc 100644\\n--- a/src/accelerate/commands/config/cluster.py\\n+++ b/src/accelerate/commands/config/cluster.py\\n@@ -16,7 +16,13 @@\\n \\n import os\\n \\n-from ...utils import ComputeEnvironment, DistributedType, is_deepspeed_available, is_transformers_available\\n+from ...utils import (\\n+ ComputeEnvironment,\\n+ DistributedType,\\n+ DynamoBackend,\\n+ is_deepspeed_available,\\n+ is_transformers_available,\\n+)\\n from ...utils.constants import (\\n DEEPSPEED_MULTINODE_LAUNCHERS,\\n FSDP_AUTO_WRAP_POLICY,\\n@@ -25,7 +31,7 @@\\n FSDP_STATE_DICT_TYPE,\\n )\\n from .config_args import ClusterConfig\\n-from .config_utils import _ask_field, _convert_distributed_mode, _convert_yes_no_to_bool\\n+from .config_utils import _ask_field, _convert_distributed_mode, _convert_dynamo_backend, _convert_yes_no_to_bool\\n \\n \\n def get_cluster_input():\\n@@ -89,6 +95,22 @@ def get_cluster_input():\\n else:\\n use_cpu = False\\n \\n+ use_dynamo = _ask_field(\\n+ \"Do you wish to optimize your script with torch dynamo?[yes/NO]:\",\\n+ _convert_yes_no_to_bool,\\n+ default=False,\\n+ error_message=\"Please enter yes or no.\",\\n+ )\\n+ if use_dynamo:\\n+ dynamo_backend = _ask_field(\\n+ \"Which dynamo backend would you like to use? ([0] eager, [1] aot_eager, [2] inductor, [3] nvfuser, [5] aot_nvfuser, [6] aot_cudagraphs, [7] ofi, [8] onnxrt, [9] ipex) [2]: \",\\n+ _convert_dynamo_backend,\\n+ default=DynamoBackend.INDUCTOR,\\n+ error_message=\"Please enter 0, 1, 2, 3, 4, 5, 6, 7, 8 or 9.\",\\n+ )\\n+ else:\\n+ dynamo_backend = DynamoBackend.NO\\n+\\n deepspeed_config = {}\\n if distributed_type in [DistributedType.MULTI_GPU, DistributedType.NO]:\\n use_deepspeed = _ask_field(\\n@@ -446,6 +468,11 @@ def get_cluster_input():\\n else:\\n mixed_precision = \"no\"\\n \\n+ if use_dynamo and mixed_precision == \"no\" and not use_cpu:\\n+ print(\\n+ \"Torch dynamo used without mixed precision requires TF32 to be efficient. Accelerate will enable it by default when launching your scripts.\"\\n+ )\\n+\\n downcast_bf16 = \"no\"\\n if distributed_type == DistributedType.TPU and mixed_precision == \"bf16\":\\n downcast_bf16 = _ask_field(\\n@@ -474,4 +501,5 @@ def get_cluster_input():\\n tpu_zone=tpu_zone,\\n commands=commands,\\n command_file=command_file,\\n+ dynamo_backend=dynamo_backend,\\n )\\ndiff --git a/src/accelerate/commands/config/config_args.py b/src/accelerate/commands/config/config_args.py\\nindex a5a841900..ba492802e 100644\\n--- a/src/accelerate/commands/config/config_args.py\\n+++ b/src/accelerate/commands/config/config_args.py\\n@@ -22,7 +22,7 @@\\n \\n import yaml\\n \\n-from ...utils import ComputeEnvironment, DistributedType, SageMakerDistributedType\\n+from ...utils import ComputeEnvironment, DistributedType, DynamoBackend, SageMakerDistributedType\\n from ...utils.constants import SAGEMAKER_PYTHON_VERSION, SAGEMAKER_PYTORCH_VERSION, SAGEMAKER_TRANSFORMERS_VERSION\\n \\n \\n@@ -70,6 +70,7 @@ class BaseConfig:\\n distributed_type: Union[DistributedType, SageMakerDistributedType]\\n mixed_precision: str\\n use_cpu: bool\\n+ dynamo_backend: DynamoBackend\\n \\n def to_dict(self):\\n result = self.__dict__\\n@@ -92,6 +93,8 @@ def from_json_file(cls, json_file=None):\\n del config_dict[\"fp16\"]\\n if \"use_cpu\" not in config_dict:\\n config_dict[\"use_cpu\"] = False\\n+ if \"dynamo_backend\" not in config_dict:\\n+ config_dict[\"dynamo_backend\"] = DynamoBackend.NO\\n return cls(**config_dict)\\n \\n def to_json_file(self, json_file):\\n@@ -113,6 +116,8 @@ def from_yaml_file(cls, yaml_file=None):\\n del config_dict[\"fp16\"]\\n if \"use_cpu\" not in config_dict:\\n config_dict[\"use_cpu\"] = False\\n+ if \"dynamo_backend\" not in config_dict:\\n+ config_dict[\"dynamo_backend\"] = DynamoBackend.NO\\n \\n return cls(**config_dict)\\n \\n@@ -128,6 +133,8 @@ def __post_init__(self):\\n self.distributed_type = SageMakerDistributedType(self.distributed_type)\\n else:\\n self.distributed_type = DistributedType(self.distributed_type)\\n+ if isinstance(self.dynamo_backend, str):\\n+ self.dynamo_backend = DynamoBackend(self.dynamo_backend.upper())\\n \\n \\n @dataclass\\ndiff --git a/src/accelerate/commands/config/config_utils.py b/src/accelerate/commands/config/config_utils.py\\nindex 9dd1f4c99..8974cddc9 100644\\n--- a/src/accelerate/commands/config/config_utils.py\\n+++ b/src/accelerate/commands/config/config_utils.py\\n@@ -14,7 +14,21 @@\\n # See the License for the specific language governing permissions and\\n # limitations under the License.\\n \\n-from ...utils.dataclasses import ComputeEnvironment, DistributedType, SageMakerDistributedType\\n+from ...utils.dataclasses import ComputeEnvironment, DistributedType, DynamoBackend, SageMakerDistributedType\\n+\\n+\\n+DYNAMO_BACKENDS = [\\n+ \"EAGER\",\\n+ \"AOT_EAGER\",\\n+ \"INDUCTOR\",\\n+ \"NVFUSER\",\\n+ \"AOT_NVFUSER\",\\n+ \"AOT_CUDAGRAPHS\",\\n+ \"OFI\",\\n+ \"FX2TRT\",\\n+ \"ONNXRT\",\\n+ \"IPEX\",\\n+]\\n \\n \\n def _ask_field(input_text, convert_value=None, default=None, error_message=None):\\n@@ -40,6 +54,11 @@ def _convert_distributed_mode(value):\\n return DistributedType([\"NO\", \"MULTI_CPU\", \"MULTI_GPU\", \"TPU\", \"MPS\"][value])\\n \\n \\n+def _convert_dynamo_backend(value):\\n+ value = int(value)\\n+ return DynamoBackend(DYNAMO_BACKENDS[value + 1])\\n+\\n+\\n def _convert_sagemaker_distributed_mode(value):\\n value = int(value)\\n return SageMakerDistributedType([\"NO\", \"DATA_PARALLEL\", \"MODEL_PARALLEL\"][value])\\ndiff --git a/src/accelerate/commands/config/sagemaker.py b/src/accelerate/commands/config/sagemaker.py\\nindex b3a45c9e4..71de4fea9 100644\\n--- a/src/accelerate/commands/config/sagemaker.py\\n+++ b/src/accelerate/commands/config/sagemaker.py\\n@@ -17,10 +17,15 @@\\n import os\\n \\n from ...utils.constants import SAGEMAKER_PARALLEL_EC2_INSTANCES\\n-from ...utils.dataclasses import ComputeEnvironment, SageMakerDistributedType\\n+from ...utils.dataclasses import ComputeEnvironment, DynamoBackend, SageMakerDistributedType\\n from ...utils.imports import is_boto3_available\\n from .config_args import SageMakerConfig\\n-from .config_utils import _ask_field, _convert_sagemaker_distributed_mode, _convert_yes_no_to_bool\\n+from .config_utils import (\\n+ _ask_field,\\n+ _convert_dynamo_backend,\\n+ _convert_sagemaker_distributed_mode,\\n+ _convert_yes_no_to_bool,\\n+)\\n \\n \\n if is_boto3_available():\\n@@ -162,6 +167,22 @@ def get_sagemaker_input():\\n error_message=\"Please enter 0 or 1\",\\n )\\n \\n+ use_dynamo = _ask_field(\\n+ \"Do you wish to optimize your script with torch dynamo?[yes/NO]:\",\\n+ _convert_yes_no_to_bool,\\n+ default=False,\\n+ error_message=\"Please enter yes or no.\",\\n+ )\\n+ if use_dynamo:\\n+ dynamo_backend = _ask_field(\\n+ \"Which dynamo backend would you like to use? ([0] eager, [1] aot_eager, [2] inductor, [3] nvfuser, [5] aot_nvfuser, [6] aot_cudagraphs, [7] ofi, [8] onnxrt, [9] ipex) [2]: \",\\n+ _convert_dynamo_backend,\\n+ default=DynamoBackend.INDUCTOR,\\n+ error_message=\"Please enter 0, 1, 2, 3, 4, 5, 6, 7, 8 or 9.\",\\n+ )\\n+ else:\\n+ dynamo_backend = DynamoBackend.NO\\n+\\n ec2_instance_query = \"Which EC2 instance type you want to use for your training \"\\n if distributed_type != SageMakerDistributedType.NO:\\n ec2_instance_query += \"(\"\\n@@ -186,15 +207,21 @@ def get_sagemaker_input():\\n \\n mixed_precision = _ask_field(\\n \"Do you wish to use FP16 or BF16 (mixed precision)? [No/FP16/BF16]: \",\\n- lambda x: str(x),\\n- default=\"No\",\\n+ lambda x: str(x).lower(),\\n+ default=\"no\",\\n )\\n \\n+ if use_dynamo and mixed_precision == \"no\":\\n+ print(\\n+ \"Torch dynamo used without mixed precision requires TF32 to be efficient. Accelerate will enable it by default when launching your scripts.\"\\n+ )\\n+\\n return SageMakerConfig(\\n image_uri=docker_image,\\n compute_environment=ComputeEnvironment.AMAZON_SAGEMAKER,\\n distributed_type=distributed_type,\\n use_cpu=False,\\n+ dynamo_backend=dynamo_backend,\\n ec2_instance_type=ec2_instance_type,\\n profile=aws_profile,\\n region=aws_region,\\ndiff --git a/src/accelerate/commands/launch.py b/src/accelerate/commands/launch.py\\nindex 46d95c713..d96ab2f45 100644\\n--- a/src/accelerate/commands/launch.py\\n+++ b/src/accelerate/commands/launch.py\\n@@ -30,10 +30,12 @@\\n import psutil\\n from accelerate.commands.config import default_config_file, load_config_from_file\\n from accelerate.commands.config.config_args import SageMakerConfig\\n+from accelerate.commands.config.config_utils import DYNAMO_BACKENDS\\n from accelerate.state import get_int_from_env\\n from accelerate.utils import (\\n ComputeEnvironment,\\n DistributedType,\\n+ DynamoBackend,\\n PrecisionType,\\n PrepareForLaunch,\\n _filter_args,\\n@@ -171,6 +173,13 @@ def launch_command_parser(subparsers=None):\\n resource_args = parser.add_argument_group(\\n \"Resource Selection Arguments\", \"Arguments for fine-tuning how available hardware should be used.\"\\n )\\n+ resource_args.add_argument(\\n+ \"--dynamo_backend\",\\n+ type=str,\\n+ choices=[\"no\"] + [b.lower() for b in DYNAMO_BACKENDS],\\n+ help=\"Choose a backend to optimize your training with dynamo, see more at \"\\n+ \"https://github.com/pytorch/torchdynamo.\",\\n+ )\\n resource_args.add_argument(\\n \"--mixed_precision\",\\n type=str,\\n@@ -546,6 +555,13 @@ def simple_launcher(args):\\n mixed_precision = \"fp16\"\\n \\n current_env[\"MIXED_PRECISION\"] = str(mixed_precision)\\n+\\n+ try:\\n+ dynamo_backend = DynamoBackend(args.dynamo_backend.upper())\\n+ except ValueError:\\n+ raise ValueError(f\"Unknown dynamo backend: {args.dynamo_backend.upper()}. Choose between {DYNAMO_BACKENDS}.\")\\n+ current_env[\"DYNAMO_BACKEND\"] = dynamo_backend.value\\n+\\n current_env[\"OMP_NUM_THREADS\"] = str(args.num_cpu_threads_per_process)\\n \\n process = subprocess.Popen(cmd, env=current_env)\\n@@ -598,6 +614,13 @@ def multi_gpu_launcher(args):\\n mixed_precision = \"fp16\"\\n \\n current_env[\"MIXED_PRECISION\"] = str(mixed_precision)\\n+\\n+ try:\\n+ dynamo_backend = DynamoBackend(args.dynamo_backend.upper())\\n+ except ValueError:\\n+ raise ValueError(f\"Unknown dynamo backend: {args.dynamo_backend.upper()}. Choose between {DYNAMO_BACKENDS}.\")\\n+ current_env[\"DYNAMO_BACKEND\"] = dynamo_backend.value\\n+\\n if args.use_fsdp:\\n current_env[\"USE_FSDP\"] = \"true\"\\n current_env[\"FSDP_SHARDING_STRATEGY\"] = str(args.fsdp_sharding_strategy)\\n@@ -893,10 +916,16 @@ def sagemaker_launcher(sagemaker_config: SageMakerConfig, args):\\n warnings.warn(\\'--fp16 flag is deprecated. Use \"--mixed_precision fp16\" instead.\\', FutureWarning)\\n mixed_precision = \"fp16\"\\n \\n+ try:\\n+ dynamo_backend = DynamoBackend(args.dynamo_backend.upper())\\n+ except ValueError:\\n+ raise ValueError(f\"Unknown dynamo backend: {args.dynamo_backend.upper()}. Choose between {DYNAMO_BACKENDS}.\")\\n+\\n # Environment variables to be set for use during training job\\n environment = {\\n \"USE_SAGEMAKER\": \"true\",\\n \"MIXED_PRECISION\": str(mixed_precision),\\n+ \"DYNAMO_BACKEND\": dynamo_backend.value,\\n \"SAGEMAKER_DISTRIBUTED_TYPE\": sagemaker_config.distributed_type.value,\\n }\\n # configure distribution set up\\n@@ -1017,6 +1046,9 @@ def launch_command(args):\\n args.mixed_precision = \"fp16\"\\n else:\\n args.mixed_precision = defaults.mixed_precision\\n+ if args.dynamo_backend is None:\\n+ warned.append(\"\\\\t`--dynamo_backend` was set to a value of `\\'no\\'`\")\\n+ args.dynamo_backend = \"no\"\\n else:\\n if args.num_processes is None:\\n args.num_processes = torch.cuda.device_count() if args.multi_gpu else 1\\n@@ -1029,6 +1061,9 @@ def launch_command(args):\\n args.mixed_precision = \"no\"\\n if not hasattr(args, \"use_cpu\"):\\n args.use_cpu = args.cpu\\n+ if args.dynamo_backend is None:\\n+ warned.append(\"\\\\t`--dynamo_backend` was set to a value of `\\'no\\'`\")\\n+ args.dynamo_backend = \"no\"\\n \\n if args.num_cpu_threads_per_process is None:\\n args.num_cpu_threads_per_process = 1\\ndiff --git a/src/accelerate/state.py b/src/accelerate/state.py\\nindex 00ba3ab67..18df61bbc 100644\\n--- a/src/accelerate/state.py\\n+++ b/src/accelerate/state.py\\n@@ -19,6 +19,7 @@\\n \\n from .utils import (\\n DistributedType,\\n+ DynamoBackend,\\n get_ccl_version,\\n get_int_from_env,\\n is_ccl_available,\\n@@ -57,6 +58,7 @@ def __init__(\\n self,\\n mixed_precision: str = None,\\n cpu: bool = False,\\n+ dynamo_backend=None,\\n deepspeed_plugin=None,\\n fsdp_plugin=None,\\n megatron_lm_plugin=None,\\n@@ -74,6 +76,10 @@ def __init__(\\n mixed_precision = (\\n parse_choice_from_env(\"MIXED_PRECISION\", \"no\") if mixed_precision is None else mixed_precision.lower()\\n )\\n+ dynamo_backend = (\\n+ parse_choice_from_env(\"DYNAMO_BACKEND\", \"no\") if dynamo_backend is None else dynamo_backend\\n+ )\\n+ self.dynamo_backend = DynamoBackend(dynamo_backend.upper())\\n if not _from_accelerator:\\n raise ValueError(\\n \"Please make sure to properly initialize your accelerator via `accelerator = Accelerator()` \"\\n@@ -230,6 +236,9 @@ def __init__(\\n else:\\n self.device = torch.device(\"cuda\")\\n self.mixed_precision = mixed_precision\\n+\\n+ if self.dynamo_backend != DynamoBackend.NO and self.mixed_precision == \"no\" and self.device.type == \"cuda\":\\n+ torch.backends.cuda.matmul.allow_tf32 = True\\n self.initialized = True\\n \\n def __repr__(self):\\ndiff --git a/src/accelerate/utils/__init__.py b/src/accelerate/utils/__init__.py\\nindex a017a2ad5..3ad09ecab 100644\\n--- a/src/accelerate/utils/__init__.py\\n+++ b/src/accelerate/utils/__init__.py\\n@@ -8,6 +8,7 @@\\n DeepSpeedPlugin,\\n DistributedDataParallelKwargs,\\n DistributedType,\\n+ DynamoBackend,\\n FullyShardedDataParallelPlugin,\\n GradScalerKwargs,\\n InitProcessGroupKwargs,\\ndiff --git a/src/accelerate/utils/dataclasses.py b/src/accelerate/utils/dataclasses.py\\nindex 7dd8798e9..e51576865 100644\\n--- a/src/accelerate/utils/dataclasses.py\\n+++ b/src/accelerate/utils/dataclasses.py\\n@@ -164,6 +164,50 @@ class ComputeEnvironment(str, enum.Enum):\\n AMAZON_SAGEMAKER = \"AMAZON_SAGEMAKER\"\\n \\n \\n+class DynamoBackend(str, enum.Enum):\\n+ \"\"\"\\n+ Represents a dynamo backend (see https://github.com/pytorch/torchdynamo).\\n+\\n+ Values:\\n+\\n+ - **NO** -- Do not use torch dynamo.\\n+ - **EAGER** -- Uses PyTorch to run the extracted GraphModule. This is quite useful in debugging TorchDynamo\\n+ issues.\\n+ - **AOT_EAGER** -- Uses AotAutograd with no compiler, i.e, just using PyTorch eager for the AotAutograd\\'s\\n+ extracted forward and backward graphs. This is useful for debugging, and unlikely to give speedups.\\n+ - **INDUCTOR** -- Uses TorchInductor backend with AotAutograd and cudagraphs by leveraging codegened Triton\\n+ kernels. [Read\\n+ more](https://dev-discuss.pytorch.org/t/torchinductor-a-pytorch-native-compiler-with-define-by-run-ir-and-symbolic-shapes/747)\\n+ - **NVFUSER** -- nvFuser with TorchScript. [Read\\n+ more](https://dev-discuss.pytorch.org/t/tracing-with-primitives-update-1-nvfuser-and-its-primitives/593)\\n+ - **AOT_NVFUSER** -- nvFuser with AotAutograd. [Read\\n+ more](https://dev-discuss.pytorch.org/t/tracing-with-primitives-update-1-nvfuser-and-its-primitives/593)\\n+ - **AOT_CUDAGRAPHS** -- cudagraphs with AotAutograd. [Read\\n+ more](https://github.com/pytorch/torchdynamo/pull/757)\\n+ - **OFI** -- Uses Torchscript optimize_for_inference. Inference only. [Read\\n+ more](https://pytorch.org/docs/stable/generated/torch.jit.optimize_for_inference.html)\\n+ - **FX2TRT** -- Uses Nvidia TensorRT for inference optimizations. Inference only. [Read\\n+ more](https://github.com/pytorch/TensorRT/blob/master/docsrc/tutorials/getting_started_with_fx_path.rst)\\n+ - **ONNXRT** -- Uses ONNXRT for inference on CPU/GPU. Inference only. [Read more](https://onnxruntime.ai/)\\n+ - **IPEX** -- Uses IPEX for inference on CPU. Inference only. [Read\\n+ more](https://github.com/intel/intel-extension-for-pytorch).\\n+\\n+ \"\"\"\\n+\\n+ # Subclassing str as well as Enum allows the `SageMakerDistributedType` to be JSON-serializable out of the box.\\n+ NO = \"NO\"\\n+ EAGER = \"EAGER\"\\n+ AOT_EAGER = \"AOT_EAGER\"\\n+ INDUCTOR = \"INDUCTOR\"\\n+ NVFUSER = \"NVFUSER\"\\n+ AOT_NVFUSER = \"AOT_NVFUSER\"\\n+ AOT_CUDAGRAPHS = \"AOT_CUDAGRAPHS\"\\n+ OFI = \"OFI\"\\n+ FX2TRT = \"FX2TRT\"\\n+ ONNXRT = \"ONNXRT\"\\n+ IPEX = \"IPEX\"\\n+\\n+\\n class EnumWithContains(enum.EnumMeta):\\n \"A metaclass that adds the ability to check if `self` contains an item with the `in` operator\"\\n \\ndiff --git a/src/accelerate/utils/other.py b/src/accelerate/utils/other.py\\nindex ff360038d..f1af373b6 100644\\n--- a/src/accelerate/utils/other.py\\n+++ b/src/accelerate/utils/other.py\\n@@ -115,7 +115,7 @@ def get_pretty_name(obj):\\n return str(obj)\\n \\n \\n-def write_basic_config(mixed_precision=\"no\", save_location: str = default_json_config_file):\\n+def write_basic_config(mixed_precision=\"no\", save_location: str = default_json_config_file, dynamo_backend=\"no\"):\\n \"\"\"\\n Creates and saves a basic cluster config to be used on a local machine with potentially multiple GPUs. Will also\\n set CPU if it is a CPU-only machine.\\n@@ -138,7 +138,11 @@ def write_basic_config(mixed_precision=\"no\", save_location: str = default_json_c\\n mixed_precision = mixed_precision.lower()\\n if mixed_precision not in [\"no\", \"fp16\", \"bf16\"]:\\n raise ValueError(f\"`mixed_precision` should be one of \\'no\\', \\'fp16\\', or \\'bf16\\'. Received {mixed_precision}\")\\n- config = {\"compute_environment\": \"LOCAL_MACHINE\", \"mixed_precision\": mixed_precision}\\n+ config = {\\n+ \"compute_environment\": \"LOCAL_MACHINE\",\\n+ \"mixed_precision\": mixed_precision,\\n+ \"dynamo_backend\": dynamo_backend,\\n+ }\\n if torch.cuda.is_available():\\n num_gpus = torch.cuda.device_count()\\n config[\"num_processes\"] = num_gpus\\n',\n", - " 'code_comments': [{'body': '```suggestion\\n Set to one of the possible dynamo backends to optimize your training with torch dynamo.\\n```\\n',\n", - " 'diff_hunk': '@@ -163,6 +164,8 @@ class Accelerator:\\n kwargs_handlers (`List[KwargHandler]`, *optional*)\\n A list of `KwargHandler` to customize how the objects related to distributed training or mixed precision\\n are created. See [kwargs](kwargs) for more information.\\n+ dynamo_backend (`str` or `DynamoBackend`, *optional*, defaults to `\"no\"`):\\n+ Set to one of the possible dynamo backends to optimizer your training with torch dynamo.',\n", - " 'from_author': False},\n", - " {'body': '```suggestion\\n if self.dynamo_backend != DynamoBackend.NO and self.mixed_precision == \"no\" and self.device.type == \"cuda\":\\n```\\n',\n", - " 'diff_hunk': '@@ -230,6 +236,9 @@ def __init__(\\n else:\\n self.device = torch.device(\"cuda\")\\n self.mixed_precision = mixed_precision\\n+\\n+ if self.dynamo_backend != DynamoBackend.NO and self.mixed_precision != \"no\" and self.device.type == \"cuda\":',\n", - " 'from_author': False}],\n", - " 'context': [{'body': 'The docs for this PR live [here](/static-proxy?url=https%3A%2F%2Fmoon-ci-docs.huggingface.co%2Fdocs%2Faccelerate%2Fpr_829). All of your documentation changes will be reflected on that endpoint.',\n", - " 'from_author': False},\n", - " {'body': 'The docs for this PR live [here](/static-proxy?url=https%3A%2F%2Fmoon-ci-docs.huggingface.co%2Fdocs%2Faccelerate%2Fpr_829). All of your documentation changes will be reflected on that endpoint.',\n", - " 'from_author': False},\n", - " {'body': 'The docs for this PR live [here](/static-proxy?url=https%3A%2F%2Fmoon-ci-docs.huggingface.co%2Fdocs%2Faccelerate%2Fpr_829). All of your documentation changes will be reflected on that endpoint.',\n", - " 'from_author': False},\n", - " {'body': 'The docs for this PR live [here](/static-proxy?url=https%3A%2F%2Fmoon-ci-docs.huggingface.co%2Fdocs%2Faccelerate%2Fpr_829). All of your documentation changes will be reflected on that endpoint.',\n", - " 'from_author': False}],\n", - " 'url': 'https://api.github.com/repos/huggingface/accelerate/pulls/829'},\n", - " 1114610182: {'diff': 'diff --git a/src/accelerate/commands/launch.py b/src/accelerate/commands/launch.py\\nindex 46d95c713..9238fbca9 100644\\n--- a/src/accelerate/commands/launch.py\\n+++ b/src/accelerate/commands/launch.py\\n@@ -56,9 +56,6 @@\\n logging.basicConfig(format=FORMAT, datefmt=\"[%X]\", handlers=[RichHandler()])\\n \\n \\n-if is_torch_version(\">=\", \"1.9.0\"):\\n- import torch.distributed.run as distrib_run\\n-\\n logger = logging.getLogger(__name__)\\n \\n options_to_group = {\\n@@ -555,6 +552,8 @@ def simple_launcher(args):\\n \\n \\n def multi_gpu_launcher(args):\\n+ if is_torch_version(\">=\", \"1.9.0\"):\\n+ import torch.distributed.run as distrib_run\\n num_processes = getattr(args, \"num_processes\")\\n num_machines = getattr(args, \"num_machines\")\\n main_process_ip = getattr(args, \"main_process_ip\")\\n@@ -644,6 +643,8 @@ def multi_gpu_launcher(args):\\n \\n \\n def deepspeed_launcher(args):\\n+ if is_torch_version(\">=\", \"1.9.0\"):\\n+ import torch.distributed.run as distrib_run\\n if not is_deepspeed_available():\\n raise ImportError(\"DeepSpeed is not installed => run `pip3 install deepspeed` or build it from source.\")\\n num_processes = getattr(args, \"num_processes\")\\ndiff --git a/src/accelerate/utils/launch.py b/src/accelerate/utils/launch.py\\nindex 8642a441a..c65df08dc 100644\\n--- a/src/accelerate/utils/launch.py\\n+++ b/src/accelerate/utils/launch.py\\n@@ -21,10 +21,6 @@\\n from .dataclasses import DistributedType\\n \\n \\n-if is_torch_version(\">=\", \"1.9.0\"):\\n- import torch.distributed.run as distrib_run\\n-\\n-\\n def get_launch_prefix():\\n \"\"\"\\n Grabs the correct launcher for starting a distributed command, such as either `torchrun`, `python -m\\n@@ -43,6 +39,8 @@ def _filter_args(args):\\n \"\"\"\\n Filters out all `accelerate` specific args\\n \"\"\"\\n+ if is_torch_version(\">=\", \"1.9.0\"):\\n+ import torch.distributed.run as distrib_run\\n distrib_args = distrib_run.get_args_parser()\\n new_args, _ = distrib_args.parse_known_args()\\n \\n',\n", - " 'code_comments': [],\n", - " 'context': [{'body': 'The docs for this PR live [here](/static-proxy?url=https%3A%2F%2Fmoon-ci-docs.huggingface.co%2Fdocs%2Faccelerate%2Fpr_828). All of your documentation changes will be reflected on that endpoint.',\n", - " 'from_author': False}],\n", - " 'url': 'https://api.github.com/repos/huggingface/accelerate/pulls/828'},\n", - " 1114535307: {'diff': 'diff --git a/.github/workflows/build_pr_documentation.yml b/.github/workflows/build_pr_documentation.yml\\nindex dc56751c6..12c600cb7 100644\\n--- a/.github/workflows/build_pr_documentation.yml\\n+++ b/.github/workflows/build_pr_documentation.yml\\n@@ -9,8 +9,11 @@ concurrency:\\n \\n jobs:\\n build:\\n- uses: huggingface/doc-builder/.github/workflows/build_pr_documentation.yml@main\\n+ uses: huggingface/doc-builder/.github/workflows/build_pr_documentation.yml@use_hf_hub\\n with:\\n commit_sha: ${{ github.event.pull_request.head.sha }}\\n pr_number: ${{ github.event.number }}\\n package: accelerate\\n+ secrets:\\n+ token: ${{ secrets.HF_DOC_PUSH }}\\n+ comment_bot_token: ${{ secrets.HUGGINGFACE_PUSH }}\\ndiff --git a/.github/workflows/delete_doc_comment.yml b/.github/workflows/delete_doc_comment.yml\\nindex da61d21df..973c2a8b3 100644\\n--- a/.github/workflows/delete_doc_comment.yml\\n+++ b/.github/workflows/delete_doc_comment.yml\\n@@ -7,7 +7,10 @@ on:\\n \\n jobs:\\n delete:\\n- uses: huggingface/doc-builder/.github/workflows/delete_doc_comment.yml@main\\n+ uses: huggingface/doc-builder/.github/workflows/delete_doc_comment.yml@use_hf_hub\\n with:\\n pr_number: ${{ github.event.number }}\\n package: accelerate\\n+ secrets:\\n+ token: ${{ secrets.HF_DOC_PUSH }}\\n+ comment_bot_token: ${{ secrets.HUGGINGFACE_PUSH }}\\n',\n", - " 'code_comments': [],\n", - " 'context': [{'body': 'The docs for this PR live [here](/static-proxy?url=https%3A%2F%2Fmoon-ci-docs.huggingface.co%2Fdocs%2Faccelerate%2Fpr_827). All of your documentation changes will be reflected on that endpoint.',\n", - " 'from_author': False}],\n", - " 'url': 'https://api.github.com/repos/huggingface/accelerate/pulls/827'},\n", - " 1113209635: {'diff': 'diff --git a/setup.py b/setup.py\\nindex 22d7cfb8d..b4708353c 100644\\n--- a/setup.py\\n+++ b/setup.py\\n@@ -19,7 +19,7 @@\\n extras[\"quality\"] = [\"black ~= 22.0\", \"isort >= 5.5.4\", \"flake8 >= 3.8.3\", \"hf-doc-builder >= 0.3.0\"]\\n extras[\"docs\"] = []\\n extras[\"test_prod\"] = [\"pytest\", \"pytest-xdist\", \"pytest-subtests\", \"parameterized\"]\\n-extras[\"test_dev\"] = [\"datasets\", \"evaluate\", \"transformers\", \"scipy\", \"sklearn\", \"deepspeed<0.7.0\", \"tqdm\"]\\n+extras[\"test_dev\"] = [\"datasets\", \"evaluate\", \"transformers\", \"scipy\", \"scikit-learn\", \"deepspeed<0.7.0\", \"tqdm\"]\\n extras[\"testing\"] = extras[\"test_prod\"] + extras[\"test_dev\"]\\n extras[\"rich\"] = [\"rich\"]\\n \\n',\n", - " 'code_comments': [],\n", - " 'context': [{'body': '_The documentation is not available anymore as the PR was closed or merged._',\n", - " 'from_author': False}],\n", - " 'url': 'https://api.github.com/repos/huggingface/accelerate/pulls/825'},\n", - " 1113202499: {'diff': 'diff --git a/src/accelerate/commands/config/cluster.py b/src/accelerate/commands/config/cluster.py\\nindex acc4a1828..aa61a6857 100644\\n--- a/src/accelerate/commands/config/cluster.py\\n+++ b/src/accelerate/commands/config/cluster.py\\n@@ -385,11 +385,23 @@ def get_cluster_input():\\n )\\n command_file = os.path.abspath(command_file)\\n else:\\n- commands = _ask_field(\\n- \"What commands do you wish to run on startup in each pod? \",\\n- default=None,\\n- error_message=\"Please enter the commands you wish to run on startup in each pod as a single string.\",\\n- )\\n+ print(\"Please enter each command seperately you wish to run on startup in each pod.\")\\n+ commands = []\\n+ another_command = True\\n+ while another_command:\\n+ commands.append(\\n+ _ask_field(\\n+ \"Please enter a single command to be ran \",\\n+ default=None,\\n+ error_message=\"Please enter the commands you wish to run on startup in each pod as a single string.\",\\n+ )\\n+ )\\n+ another_command = _ask_field(\\n+ \"Do you wish to add another command? [yes/NO]: \",\\n+ _convert_yes_no_to_bool,\\n+ default=False,\\n+ error_message=\"Please enter yes or no.\",\\n+ )\\n \\n else:\\n main_training_function = \"main\"\\ndiff --git a/src/accelerate/commands/config/config_args.py b/src/accelerate/commands/config/config_args.py\\nindex 9a1247c55..a5a841900 100644\\n--- a/src/accelerate/commands/config/config_args.py\\n+++ b/src/accelerate/commands/config/config_args.py\\n@@ -155,7 +155,7 @@ class ClusterConfig(BaseConfig):\\n tpu_name: str = None\\n tpu_zone: str = None\\n command_file: str = None\\n- command: List[str] = None\\n+ commands: List[str] = None\\n \\n def __post_init__(self):\\n if self.deepspeed_config is None:\\ndiff --git a/src/accelerate/commands/tpu.py b/src/accelerate/commands/tpu.py\\nindex 59bbb08e9..6b90770c7 100644\\n--- a/src/accelerate/commands/tpu.py\\n+++ b/src/accelerate/commands/tpu.py\\n@@ -89,8 +89,8 @@ def tpu_command_launcher(args):\\n defaults = load_config_from_file(args.config_file)\\n if not args.command_file and defaults.command_file is not None and not args.command:\\n args.command_file = defaults.command_file\\n- if not args.command and defaults.command is not None:\\n- args.command = defaults.command\\n+ if not args.command and defaults.commands is not None:\\n+ args.command = defaults.commands\\n if not args.tpu_name:\\n args.tpu_name = defaults.tpu_name\\n if not args.tpu_zone:\\n@@ -110,7 +110,8 @@ def tpu_command_launcher(args):\\n args.command = [f.read().splitlines()]\\n \\n # To turn list of lists into list of strings\\n- args.command = [line for cmd in args.command for line in cmd]\\n+ if isinstance(args.command[0], list):\\n+ args.command = [line for cmd in args.command for line in cmd]\\n # Default to the shared folder and install accelerate\\n new_cmd = [\"cd /usr/share\"]\\n if args.install_accelerate:\\ndiff --git a/tests/test_configs/latest.yaml b/tests/test_configs/latest.yaml\\nindex de6be03a4..456348ef9 100644\\n--- a/tests/test_configs/latest.yaml\\n+++ b/tests/test_configs/latest.yaml\\n@@ -17,5 +17,5 @@ same_network: true\\n use_cpu: false\\n tpu_name: \\'test-tpu\\'\\n tpu_zone: \\'us-central1-a\\'\\n-command: null\\n+commands: null\\n command_file: tests/test_samples/test_command_file.sh\\n\\\\ No newline at end of file\\n',\n", - " 'code_comments': [],\n", - " 'context': [{'body': '_The documentation is not available anymore as the PR was closed or merged._',\n", - " 'from_author': False},\n", - " {'body': '@sgugger fails are unrelated, see https://github.com/huggingface/accelerate/pull/825',\n", - " 'from_author': True}],\n", - " 'url': 'https://api.github.com/repos/huggingface/accelerate/pulls/824'},\n", - " 1112992023: {'diff': \"diff --git a/docs/source/usage_guides/tracking.mdx b/docs/source/usage_guides/tracking.mdx\\nindex cc5c17418..b22a4dd14 100644\\n--- a/docs/source/usage_guides/tracking.mdx\\n+++ b/docs/source/usage_guides/tracking.mdx\\n@@ -17,11 +17,12 @@ There are a large number of experiment tracking API's available, however getting\\n \\n ## Integrated Trackers\\n \\n-Currently `Accelerate` supports three trackers out-of-the-box:\\n+Currently `Accelerate` supports four trackers out-of-the-box:\\n \\n - TensorBoard\\n - WandB\\n - CometML\\n+- MLFlow\\n \\n To use any of them, pass in the selected type(s) to the `log_with` parameter in [`Accelerate`]:\\n ```python\\ndiff --git a/docs/source/usage_guides/training_zoo.mdx b/docs/source/usage_guides/training_zoo.mdx\\nindex 444784b49..176d97d2b 100644\\n--- a/docs/source/usage_guides/training_zoo.mdx\\n+++ b/docs/source/usage_guides/training_zoo.mdx\\n@@ -21,7 +21,10 @@ Below contains a non-exhuastive list of tutorials and scripts showcasing Acceler\\n These examples showcase the base features of Accelerate and are a great starting point\\n \\n - [Barebones NLP example](https://github.com/huggingface/accelerate/blob/main/examples/nlp_example.py)\\n+- [Barebones distributed NLP example in a Jupyter Notebook](https://github.com/huggingface/notebooks/blob/main/examples/accelerate_examples/simple_nlp_example.ipynb)\\n - [Barebones computer vision example](https://github.com/huggingface/accelerate/blob/main/examples/cv_example.py)\\n+- [Barebones distributed computer vision example in a Jupyter Notebook](https://github.com/huggingface/notebooks/blob/main/examples/accelerate_examples/simple_cv_example.ipynb)\\n+- [Using Accelerate in Kaggle](https://www.kaggle.com/code/muellerzr/multi-gpu-and-accelerate)\\n \\n ### Feature Specific Examples\\n \\n@@ -101,6 +104,10 @@ These are tutorials from libraries that integrate with 🤗 Accelerate:\\n \\n - [Perform Deep Learning with 3D data](https://pytorch3d.org/tutorials/)\\n \\n+### Stable-Dreamfusion\\n+\\n+- [Training with Stable-Dreamfusion to convert text to a 3D model](https://colab.research.google.com/drive/1MXT3yfOFvO0ooKEfiUUvTKwUkrrlCHpF?usp=sharing)\\n+\\n ### Tez \\n \\n - [Leaf disease detection with Tez and Accelerate](https://www.kaggle.com/code/abhishek/tez-faster-and-easier-training-for-leaf-detection/notebook)\\n\",\n", - " 'code_comments': [],\n", - " 'context': [{'body': '_The documentation is not available anymore as the PR was closed or merged._',\n", - " 'from_author': False}],\n", - " 'url': 'https://api.github.com/repos/huggingface/accelerate/pulls/823'},\n", - " 1112054741: {'diff': 'diff --git a/examples/by_feature/deepspeed_with_config_support.py b/examples/by_feature/deepspeed_with_config_support.py\\nindex 36ace84b9..0fdff58ae 100755\\n--- a/examples/by_feature/deepspeed_with_config_support.py\\n+++ b/examples/by_feature/deepspeed_with_config_support.py\\n@@ -285,10 +285,9 @@ def evaluate(args, model, eval_dataloader, accelerator, eval_dataset):\\n outputs = model(**batch)\\n \\n loss = outputs.loss\\n- losses.append(accelerator.gather(loss.repeat(args.per_device_eval_batch_size)))\\n+ losses.append(accelerator.gather_for_metrics(loss.repeat(args.per_device_eval_batch_size)))\\n \\n losses = torch.cat(losses)\\n- losses = losses[: len(eval_dataset)]\\n try:\\n eval_loss = torch.mean(losses)\\n perplexity = math.exp(eval_loss)\\n',\n", - " 'code_comments': [{'body': 'This line (291) becomes redundant when using `gather_for_metrics`',\n", - " 'diff_hunk': '@@ -285,7 +285,7 @@ def evaluate(args, model, eval_dataloader, accelerator, eval_dataset):\\n outputs = model(**batch)\\n \\n loss = outputs.loss\\n- losses.append(accelerator.gather(loss.repeat(args.per_device_eval_batch_size)))\\n+ losses.append(accelerator.gather_for_metrics(loss.repeat(args.per_device_eval_batch_size)))\\n \\n losses = torch.cat(losses)\\n losses = losses[: len(eval_dataset)]',\n", - " 'from_author': False},\n", - " {'body': 'Ah yeah, makes sense thank you!',\n", - " 'diff_hunk': '@@ -285,7 +285,7 @@ def evaluate(args, model, eval_dataloader, accelerator, eval_dataset):\\n outputs = model(**batch)\\n \\n loss = outputs.loss\\n- losses.append(accelerator.gather(loss.repeat(args.per_device_eval_batch_size)))\\n+ losses.append(accelerator.gather_for_metrics(loss.repeat(args.per_device_eval_batch_size)))\\n \\n losses = torch.cat(losses)\\n losses = losses[: len(eval_dataset)]',\n", - " 'from_author': True}],\n", - " 'context': [{'body': 'The docs for this PR live [here](/static-proxy?url=https%3A%2F%2Fmoon-ci-docs.huggingface.co%2Fdocs%2Faccelerate%2Fpr_821). All of your documentation changes will be reflected on that endpoint.',\n", - " 'from_author': False}],\n", - " 'url': 'https://api.github.com/repos/huggingface/accelerate/pulls/821'},\n", - " 1110563062: {'diff': 'diff --git a/src/accelerate/accelerator.py b/src/accelerate/accelerator.py\\nindex 63ea4a71b..e2248fcfa 100644\\n--- a/src/accelerate/accelerator.py\\n+++ b/src/accelerate/accelerator.py\\n@@ -25,7 +25,7 @@\\n import torch\\n \\n from .checkpointing import load_accelerator_state, load_custom_state, save_accelerator_state, save_custom_state\\n-from .data_loader import prepare_data_loader\\n+from .data_loader import DataLoaderDispatcher, prepare_data_loader\\n from .logging import get_logger\\n from .optimizer import AcceleratedOptimizer\\n from .scheduler import AcceleratedScheduler\\n@@ -87,6 +87,9 @@\\n megatron_lm_prepare_scheduler,\\n )\\n \\n+if is_torch_version(\">\", \"1.10.0\"):\\n+ from torch.distributed.algorithms.join import Join\\n+\\n \\n if is_tpu_available(check_device=False):\\n import torch_xla.distributed.xla_multiprocessing as xmp\\n@@ -353,6 +356,7 @@ def __init__(\\n self._optimizers = []\\n self._models = []\\n self._schedulers = []\\n+ self._dataloaders = []\\n self._custom_objects = []\\n \\n # RNG Types\\n@@ -608,6 +612,93 @@ def accumulate(self, model):\\n with context(model):\\n yield\\n \\n+ @contextmanager\\n+ def join_uneven_inputs(self, joinables, even_batches=None):\\n+ \"\"\"\\n+ A context manager that facilitates distributed training or evaluation on uneven inputs, which acts as a wrapper\\n+ around `torch.distributed.algorithms.join`. This is useful when the total batch size does not evenly divide the\\n+ length of the dataset.\\n+\\n+ Args:\\n+ joinables (`List[torch.distributed.algorithms.Joinable]`):\\n+ A list of models or optimizers that subclass `torch.distributed.algorithms.Joinable`. Most commonly, a\\n+ PyTorch Module that was prepared with `Accelerator.prepare` for DistributedDataParallel training.\\n+ even_batches (`bool`, *optional*)\\n+ If set, this will override the value of `even_batches` set in the `Accelerator`. If it is not provided,\\n+ the default `Accelerator` value wil be used.\\n+\\n+ \\n+\\n+ `join_uneven_inputs` is only supported for Distributed Data Parallel training on multiple GPUs. For any other\\n+ configuration, this method will have no effect.\\n+\\n+ \\n+\\n+ \\n+\\n+ Overidding `even_batches` will not affect iterable-style data loaders.\\n+\\n+ \\n+\\n+ Example:\\n+\\n+ ```python\\n+ >>> from accelerate import Accelerator\\n+\\n+ >>> accelerator = Accelerator(even_batches=True)\\n+ >>> ddp_model, optimizer, dataloader = accelerator.prepare(model, optimizer, dataloader)\\n+\\n+ >>> with accelerator.join_uneven_inputs([ddp_model], even_batches=False):\\n+ ... for input, output in dataloader:\\n+ ... outputs = model(input)\\n+ ... loss = loss_func(outputs)\\n+ ... loss.backward()\\n+ ... optimizer.step()\\n+ ... optimizer.zero_grad()\\n+ ```\\n+\\n+ \"\"\"\\n+ if is_torch_version(\"<\", \"1.10.0\"):\\n+ raise ValueError(f\"Joining uneven inputs requires PyTorch >= 1.10.0, You have {torch.__version__}.\")\\n+\\n+ if self.distributed_type == DistributedType.MULTI_GPU:\\n+ dl_even_batches_values = []\\n+\\n+ if even_batches is not None:\\n+ iterable_dl_seen = False\\n+ # override value in batch sampler for map-style datasets\\n+ for dl_idx, dl in enumerate(self._dataloaders):\\n+ if isinstance(dl, DataLoaderDispatcher):\\n+ iterable_dl_seen = True\\n+ continue\\n+ dl_even_batches_values.append((dl_idx, dl.batch_sampler.even_batches))\\n+ dl.batch_sampler.even_batches = even_batches\\n+\\n+ if iterable_dl_seen:\\n+ warnings.warn(\\n+ \"Overridding even_batches is only supported for map-style datasets, yet some dataloaders given were iterable\"\\n+ )\\n+ else:\\n+ even_batches = self.even_batches\\n+\\n+ enable_join = False if even_batches else True\\n+ try:\\n+ with Join(joinables, enable=enable_join, throw_on_early_termination=False):\\n+ yield\\n+ finally:\\n+ # reset any batch samplers that have been modified\\n+ for dl_idx, even_batches_value in dl_even_batches_values:\\n+ self._dataloaders[dl_idx].batch_sampler.even_batches = even_batches_value\\n+ else:\\n+ # Even when disabled, Join expects models to subclass Joinable, so skip entirely for single process runs\\n+ if self.distributed_type != DistributedType.NO:\\n+ warnings.warn(\\n+ \"Joining uneven inputs is only supported for multi-GPU training, as a result `join_uneven_inputs` will have no effect.\"\\n+ )\\n+\\n+ with contextlib.nullcontext(joinables):\\n+ yield\\n+\\n def print(self, *args, **kwargs):\\n \"\"\"\\n Use in replacement of `print()` to only print once per server.\\n@@ -1117,7 +1208,7 @@ def prepare_data_loader(self, data_loader: torch.utils.data.DataLoader, device_p\\n \"\"\"\\n if device_placement is None:\\n device_placement = self.device_placement if self.distributed_type != DistributedType.TPU else False\\n- return prepare_data_loader(\\n+ prepared_data_loader = prepare_data_loader(\\n data_loader,\\n self.device,\\n num_processes=self.num_processes,\\n@@ -1128,6 +1219,8 @@ def prepare_data_loader(self, data_loader: torch.utils.data.DataLoader, device_p\\n dispatch_batches=self.dispatch_batches,\\n even_batches=self.even_batches,\\n )\\n+ self._dataloaders.append(prepared_data_loader)\\n+ return prepared_data_loader\\n \\n def prepare_optimizer(self, optimizer: torch.optim.Optimizer, device_placement=None):\\n \"\"\"\\n@@ -1611,6 +1704,7 @@ def free_memory(self):\\n self._schedulers = []\\n self._optimizers = []\\n self._models = []\\n+ self._dataloaders = []\\n self.deepspeed_engine_wrapped = None\\n gc.collect()\\n torch.cuda.empty_cache()\\ndiff --git a/src/accelerate/test_utils/scripts/test_distributed_data_loop.py b/src/accelerate/test_utils/scripts/test_distributed_data_loop.py\\nindex eaf7c6a34..6576e0358 100644\\n--- a/src/accelerate/test_utils/scripts/test_distributed_data_loop.py\\n+++ b/src/accelerate/test_utils/scripts/test_distributed_data_loop.py\\n@@ -14,12 +14,25 @@\\n # See the License for the specific language governing permissions and\\n # limitations under the License.\\n \\n+\\n+import warnings\\n from typing import List\\n+from unittest.mock import Mock\\n \\n import torch\\n-from torch.utils.data import DataLoader, TensorDataset\\n+from torch.utils.data import DataLoader, IterableDataset, TensorDataset\\n \\n from accelerate.accelerator import Accelerator\\n+from accelerate.utils.dataclasses import DistributedType\\n+\\n+\\n+class DummyIterableDataset(IterableDataset):\\n+ def __init__(self, data):\\n+ self.data = data\\n+\\n+ def __iter__(self):\\n+ for element in self.data:\\n+ yield element\\n \\n \\n def create_accelerator(even_batches=True):\\n@@ -28,11 +41,14 @@ def create_accelerator(even_batches=True):\\n return accelerator\\n \\n \\n-def create_dataloader(accelerator: Accelerator, dataset_size: int, batch_size: int):\\n+def create_dataloader(accelerator: Accelerator, dataset_size: int, batch_size: int, iterable: bool = False):\\n \"\"\"\\n Create a simple DataLoader to use during the test cases\\n \"\"\"\\n- dataset = TensorDataset(torch.as_tensor(range(dataset_size)))\\n+ if iterable:\\n+ dataset = DummyIterableDataset(torch.as_tensor(range(dataset_size)))\\n+ else:\\n+ dataset = TensorDataset(torch.as_tensor(range(dataset_size)))\\n \\n dl = DataLoader(dataset, batch_size=batch_size)\\n dl = accelerator.prepare(dl)\\n@@ -103,7 +119,96 @@ def test_can_disable_even_batches():\\n )\\n \\n \\n-if __name__ == \"__main__\":\\n+def test_can_join_uneven_inputs():\\n+ accelerator = create_accelerator(even_batches=False)\\n+\\n+ model = torch.nn.Linear(1, 1)\\n+ ddp_model = accelerator.prepare(model)\\n+\\n+ dl = create_dataloader(accelerator, dataset_size=3, batch_size=1)\\n+\\n+ batch_idxs = []\\n+ with accelerator.join_uneven_inputs([ddp_model]):\\n+ for batch_idx, batch in enumerate(dl):\\n+ output = ddp_model(batch[0].float())\\n+ loss = output.sum()\\n+ loss.backward()\\n+ batch_idxs.append(batch_idx)\\n+\\n+ accelerator.wait_for_everyone()\\n+\\n+ if accelerator.process_index == 0:\\n+ assert batch_idxs == [0, 1]\\n+ elif accelerator.process_index == 1:\\n+ assert batch_idxs == [0]\\n+\\n+\\n+def test_join_raises_warning_for_non_ddp_distributed(accelerator):\\n+\\n+ with warnings.catch_warnings(record=True) as w:\\n+ with accelerator.join_uneven_inputs([Mock()]):\\n+ pass\\n+\\n+ assert issubclass(w[-1].category, UserWarning)\\n+ assert \"only supported for multi-GPU\" in str(w[-1].message)\\n+\\n+\\n+def test_join_can_override_even_batches():\\n+ default_even_batches = True\\n+ overridden_even_batches = False\\n+ accelerator = create_accelerator(even_batches=default_even_batches)\\n+ model = torch.nn.Linear(1, 1)\\n+ ddp_model = accelerator.prepare(model)\\n+ train_dl = create_dataloader(accelerator, dataset_size=3, batch_size=1)\\n+ valid_dl = create_dataloader(accelerator, dataset_size=3, batch_size=1)\\n+\\n+ with accelerator.join_uneven_inputs([ddp_model], even_batches=overridden_even_batches):\\n+ train_dl_overridden_value = train_dl.batch_sampler.even_batches\\n+ valid_dl_overridden_value = valid_dl.batch_sampler.even_batches\\n+\\n+ assert train_dl_overridden_value == overridden_even_batches\\n+ assert valid_dl_overridden_value == overridden_even_batches\\n+ assert train_dl.batch_sampler.even_batches == default_even_batches\\n+ assert valid_dl.batch_sampler.even_batches == default_even_batches\\n+\\n+\\n+def test_join_can_override_for_mixed_type_dataloaders():\\n+ default_even_batches = True\\n+ overridden_even_batches = False\\n+ accelerator = create_accelerator(even_batches=default_even_batches)\\n+ model = torch.nn.Linear(1, 1)\\n+ ddp_model = accelerator.prepare(model)\\n+ create_dataloader(accelerator, dataset_size=3, batch_size=1, iterable=True)\\n+ batch_dl = create_dataloader(accelerator, dataset_size=3, batch_size=1)\\n+\\n+ with warnings.catch_warnings():\\n+ warnings.filterwarnings(\"ignore\")\\n+ try:\\n+ with accelerator.join_uneven_inputs([ddp_model], even_batches=overridden_even_batches):\\n+ batch_dl_overridden_value = batch_dl.batch_sampler.even_batches\\n+ except AttributeError:\\n+ # ensure attribute error is not raised when processing iterable dl\\n+ raise AssertionError\\n+\\n+ assert batch_dl_overridden_value == overridden_even_batches\\n+ assert batch_dl.batch_sampler.even_batches == default_even_batches\\n+\\n+\\n+def test_join_raises_warning_for_iterable_when_overriding_even_batches():\\n+ accelerator = create_accelerator()\\n+ model = torch.nn.Linear(1, 1)\\n+ ddp_model = accelerator.prepare(model)\\n+ create_dataloader(accelerator, dataset_size=3, batch_size=1, iterable=True)\\n+\\n+ with warnings.catch_warnings(record=True) as w:\\n+ with accelerator.join_uneven_inputs([ddp_model], even_batches=False):\\n+ pass\\n+\\n+ assert issubclass(w[-1].category, UserWarning)\\n+ assert \"only supported for map-style datasets\" in str(w[-1].message)\\n+\\n+\\n+def main():\\n accelerator = create_accelerator()\\n \\n accelerator.print(\"Test that even_batches variable ensures uniform batches across processes\")\\n@@ -111,3 +216,25 @@ def test_can_disable_even_batches():\\n \\n accelerator.print(\"Run tests with even_batches disabled\")\\n test_can_disable_even_batches()\\n+\\n+ accelerator.print(\"Test joining uneven inputs\")\\n+ test_can_join_uneven_inputs()\\n+\\n+ accelerator.print(\"Test overriding even_batches when joining uneven inputs\")\\n+ test_join_can_override_even_batches()\\n+\\n+ accelerator.print(\"Test overriding even_batches for mixed dataloader types\")\\n+ test_join_can_override_for_mixed_type_dataloaders()\\n+\\n+ accelerator.print(\"Test overriding even_batches raises a warning for iterable dataloaders\")\\n+ test_join_raises_warning_for_iterable_when_overriding_even_batches()\\n+\\n+ accelerator.print(\"Test join with non DDP distributed raises warning\")\\n+ original_state = accelerator.state.distributed_type\\n+ accelerator.state.distributed_type = DistributedType.FSDP\\n+ test_join_raises_warning_for_non_ddp_distributed(accelerator)\\n+ accelerator.state.distributed_type = original_state\\n+\\n+\\n+if __name__ == \"__main__\":\\n+ main()\\ndiff --git a/tests/test_accelerator.py b/tests/test_accelerator.py\\nnew file mode 100644\\nindex 000000000..19d6c1655\\n--- /dev/null\\n+++ b/tests/test_accelerator.py\\n@@ -0,0 +1,51 @@\\n+import unittest\\n+\\n+import torch\\n+from torch.utils.data import DataLoader, TensorDataset\\n+\\n+from accelerate.accelerator import Accelerator\\n+from accelerate.state import AcceleratorState\\n+\\n+\\n+def create_components():\\n+ model = torch.nn.Linear(2, 4)\\n+ optimizer = torch.optim.AdamW(model.parameters(), lr=1.0)\\n+ scheduler = torch.optim.lr_scheduler.OneCycleLR(optimizer, max_lr=0.01, steps_per_epoch=2, epochs=1)\\n+ train_dl = DataLoader(TensorDataset(torch.tensor([1, 2, 3])))\\n+ valid_dl = DataLoader(TensorDataset(torch.tensor([4, 5, 6])))\\n+\\n+ return model, optimizer, scheduler, train_dl, valid_dl\\n+\\n+\\n+class AcceleratorTester(unittest.TestCase):\\n+ def test_prepared_objects_are_referenced(self):\\n+ accelerator = Accelerator()\\n+ model, optimizer, scheduler, train_dl, valid_dl = create_components()\\n+\\n+ (\\n+ prepared_model,\\n+ prepared_optimizer,\\n+ prepared_scheduler,\\n+ prepared_train_dl,\\n+ prepared_valid_dl,\\n+ ) = accelerator.prepare(model, optimizer, scheduler, train_dl, valid_dl)\\n+\\n+ self.assertTrue(prepared_model in accelerator._models)\\n+ self.assertTrue(prepared_optimizer in accelerator._optimizers)\\n+ self.assertTrue(prepared_scheduler in accelerator._schedulers)\\n+ self.assertTrue(prepared_train_dl in accelerator._dataloaders)\\n+ self.assertTrue(prepared_valid_dl in accelerator._dataloaders)\\n+ AcceleratorState._reset_state()\\n+\\n+ def test_free_memory_dereferences_prepared_components(self):\\n+ accelerator = Accelerator()\\n+ model, optimizer, scheduler, train_dl, valid_dl = create_components()\\n+ accelerator.prepare(model, optimizer, scheduler, train_dl, valid_dl)\\n+\\n+ accelerator.free_memory()\\n+\\n+ self.assertTrue(len(accelerator._models) == 0)\\n+ self.assertTrue(len(accelerator._optimizers) == 0)\\n+ self.assertTrue(len(accelerator._schedulers) == 0)\\n+ self.assertTrue(len(accelerator._dataloaders) == 0)\\n+ AcceleratorState._reset_state()\\n',\n", - " 'code_comments': [{'body': 'Maybe only throw an error if there are no dataloader with that attribute? I can imagine having an iterable dataset for training but a standard dataset for evaluation.',\n", - " 'diff_hunk': '@@ -608,6 +612,41 @@ def accumulate(self, model):\\n with context(model):\\n yield\\n \\n+ @contextmanager\\n+ def join_uneven_inputs(self, joinables, even_batches=None):\\n+ \"\"\"\\n+ A context manager that facilitates distributed training on uneven inputs, which acts as a wrapper around `torch.distributed.algorithms.join`.\\n+ \"\"\"\\n+ if is_torch_version(\"<\", \"1.10.0\"):\\n+ raise ValueError(f\"Joining uneven inputs requires PyTorch >= 1.10.0, You have {torch.__version__}.\")\\n+\\n+ if self.distributed_type == DistributedType.NO:\\n+ # Even when disabled, Join expects models to subclass Joinable, so skip entirely for single process runs\\n+ with contextlib.nullcontext(joinables):\\n+ yield\\n+\\n+ elif self.distributed_type == DistributedType.MULTI_GPU:\\n+ dl_even_batches_values = []\\n+\\n+ if even_batches is not None:\\n+ for dl in self._dataloaders:\\n+ if not hasattr(dl, \"even_batches\"):\\n+ raise ValueError(\"Overridding even_batches is not supported for iterable-style datasets\")',\n", - " 'from_author': False},\n", - " {'body': \"I'd just issue a warning and exit.\",\n", - " 'diff_hunk': '@@ -608,6 +612,41 @@ def accumulate(self, model):\\n with context(model):\\n yield\\n \\n+ @contextmanager\\n+ def join_uneven_inputs(self, joinables, even_batches=None):\\n+ \"\"\"\\n+ A context manager that facilitates distributed training on uneven inputs, which acts as a wrapper around `torch.distributed.algorithms.join`.\\n+ \"\"\"\\n+ if is_torch_version(\"<\", \"1.10.0\"):\\n+ raise ValueError(f\"Joining uneven inputs requires PyTorch >= 1.10.0, You have {torch.__version__}.\")\\n+\\n+ if self.distributed_type == DistributedType.NO:\\n+ # Even when disabled, Join expects models to subclass Joinable, so skip entirely for single process runs\\n+ with contextlib.nullcontext(joinables):\\n+ yield\\n+\\n+ elif self.distributed_type == DistributedType.MULTI_GPU:\\n+ dl_even_batches_values = []\\n+\\n+ if even_batches is not None:\\n+ for dl in self._dataloaders:\\n+ if not hasattr(dl, \"even_batches\"):\\n+ raise ValueError(\"Overridding even_batches is not supported for iterable-style datasets\")\\n+ dl_even_batches_values.append(dl.even_batches)\\n+ dl.even_batches = even_batches\\n+ else:\\n+ even_batches = self.even_batches\\n+\\n+ enable_join = False if even_batches else True\\n+ try:\\n+ with Join(joinables, enable=enable_join, throw_on_early_termination=False):\\n+ yield\\n+ finally:\\n+ for dl, even_batches_value in zip(self._dataloaders, dl_even_batches_values):\\n+ dl.even_batches = even_batches_value\\n+ else:\\n+ raise ValueError(\"Joining uneven inputs is only supported for DistributedDataParallel training\")',\n", - " 'from_author': False},\n", - " {'body': 'This test will need two processes, so probably needs some decorator/special launcher.',\n", - " 'diff_hunk': '@@ -103,11 +117,78 @@ def test_can_disable_even_batches():\\n )\\n \\n \\n-if __name__ == \"__main__\":\\n+def test_can_join_uneven_inputs():\\n+ accelerator = create_accelerator(even_batches=False)\\n+\\n+ model = torch.nn.Linear(1, 1)\\n+ ddp_model = accelerator.prepare(model)\\n+\\n+ dl = create_dataloader(accelerator, dataset_size=3, batch_size=1)\\n+\\n+ batch_idxs = []\\n+ with accelerator.join_uneven_inputs([ddp_model]):\\n+ for batch_idx, batch in enumerate(dl):\\n+ output = ddp_model(batch[0].float())\\n+ loss = output.sum()\\n+ loss.backward()\\n+ batch_idxs.append(batch_idx)\\n+\\n+ accelerator.wait_for_everyone()\\n+\\n+ if accelerator.process_index == 0:',\n", - " 'from_author': False},\n", - " {'body': 'You are correct. I think that this should be taken care of here, from my previous PR: https://github.com/huggingface/accelerate/blob/main/tests/test_multigpu.py#L49',\n", - " 'diff_hunk': '@@ -103,11 +117,78 @@ def test_can_disable_even_batches():\\n )\\n \\n \\n-if __name__ == \"__main__\":\\n+def test_can_join_uneven_inputs():\\n+ accelerator = create_accelerator(even_batches=False)\\n+\\n+ model = torch.nn.Linear(1, 1)\\n+ ddp_model = accelerator.prepare(model)\\n+\\n+ dl = create_dataloader(accelerator, dataset_size=3, batch_size=1)\\n+\\n+ batch_idxs = []\\n+ with accelerator.join_uneven_inputs([ddp_model]):\\n+ for batch_idx, batch in enumerate(dl):\\n+ output = ddp_model(batch[0].float())\\n+ loss = output.sum()\\n+ loss.backward()\\n+ batch_idxs.append(batch_idx)\\n+\\n+ accelerator.wait_for_everyone()\\n+\\n+ if accelerator.process_index == 0:',\n", - " 'from_author': True},\n", - " {'body': 'Good spot. Based on the other feedback, I have just raised a warning for this rather than an exception',\n", - " 'diff_hunk': '@@ -608,6 +612,41 @@ def accumulate(self, model):\\n with context(model):\\n yield\\n \\n+ @contextmanager\\n+ def join_uneven_inputs(self, joinables, even_batches=None):\\n+ \"\"\"\\n+ A context manager that facilitates distributed training on uneven inputs, which acts as a wrapper around `torch.distributed.algorithms.join`.\\n+ \"\"\"\\n+ if is_torch_version(\"<\", \"1.10.0\"):\\n+ raise ValueError(f\"Joining uneven inputs requires PyTorch >= 1.10.0, You have {torch.__version__}.\")\\n+\\n+ if self.distributed_type == DistributedType.NO:\\n+ # Even when disabled, Join expects models to subclass Joinable, so skip entirely for single process runs\\n+ with contextlib.nullcontext(joinables):\\n+ yield\\n+\\n+ elif self.distributed_type == DistributedType.MULTI_GPU:\\n+ dl_even_batches_values = []\\n+\\n+ if even_batches is not None:\\n+ for dl in self._dataloaders:\\n+ if not hasattr(dl, \"even_batches\"):\\n+ raise ValueError(\"Overridding even_batches is not supported for iterable-style datasets\")',\n", - " 'from_author': True},\n", - " {'body': 'Done',\n", - " 'diff_hunk': '@@ -608,6 +612,41 @@ def accumulate(self, model):\\n with context(model):\\n yield\\n \\n+ @contextmanager\\n+ def join_uneven_inputs(self, joinables, even_batches=None):\\n+ \"\"\"\\n+ A context manager that facilitates distributed training on uneven inputs, which acts as a wrapper around `torch.distributed.algorithms.join`.\\n+ \"\"\"\\n+ if is_torch_version(\"<\", \"1.10.0\"):\\n+ raise ValueError(f\"Joining uneven inputs requires PyTorch >= 1.10.0, You have {torch.__version__}.\")\\n+\\n+ if self.distributed_type == DistributedType.NO:\\n+ # Even when disabled, Join expects models to subclass Joinable, so skip entirely for single process runs\\n+ with contextlib.nullcontext(joinables):\\n+ yield\\n+\\n+ elif self.distributed_type == DistributedType.MULTI_GPU:\\n+ dl_even_batches_values = []\\n+\\n+ if even_batches is not None:\\n+ for dl in self._dataloaders:\\n+ if not hasattr(dl, \"even_batches\"):\\n+ raise ValueError(\"Overridding even_batches is not supported for iterable-style datasets\")\\n+ dl_even_batches_values.append(dl.even_batches)\\n+ dl.even_batches = even_batches\\n+ else:\\n+ even_batches = self.even_batches\\n+\\n+ enable_join = False if even_batches else True\\n+ try:\\n+ with Join(joinables, enable=enable_join, throw_on_early_termination=False):\\n+ yield\\n+ finally:\\n+ for dl, even_batches_value in zip(self._dataloaders, dl_even_batches_values):\\n+ dl.even_batches = even_batches_value\\n+ else:\\n+ raise ValueError(\"Joining uneven inputs is only supported for DistributedDataParallel training\")',\n", - " 'from_author': True},\n", - " {'body': \"Let's add a warning here maybe? Longterm, `DataLoaderDispatcher` will also support the `even_batches` kwarg.\",\n", - " 'diff_hunk': '@@ -608,6 +612,91 @@ def accumulate(self, model):\\n with context(model):\\n yield\\n \\n+ @contextmanager\\n+ def join_uneven_inputs(self, joinables, even_batches=None):\\n+ \"\"\"\\n+ A context manager that facilitates distributed training or evaluation on uneven inputs, which acts as a wrapper\\n+ around `torch.distributed.algorithms.join`. This is useful when the total batch size does not evenly divide the\\n+ length of the dataset.\\n+\\n+ Args:\\n+ joinables (`List[torch.distributed.algorithms.Joinable]`):\\n+ A list of models or optimizers that subclass `torch.distributed.algorithms.Joinable`. Most commonly, a\\n+ PyTorch Module that was prepared with `Accelerator.prepare` for DistributedDataParallel training.\\n+ even_batches (`bool`, *optional*)\\n+ If set, this will override the value of `even_batches` set in the `Accelerator`. If it is not provided,\\n+ the default `Accelerator` value wil be used.\\n+\\n+ \\n+\\n+ `join_uneven_inputs` is only supported for Distributed Data Parallel training on multiple GPUs. For any other\\n+ configuration, this method will have no effect.\\n+\\n+ \\n+\\n+ \\n+\\n+ Overidding `even_batches` will not affect iterable-style data loaders.\\n+\\n+ \\n+\\n+ Example:\\n+\\n+ ```python\\n+ >>> from accelerate import Accelerator\\n+\\n+ >>> accelerator = Accelerator(even_batches=True)\\n+ >>> ddp_model, optimizer, dataloader = accelerator.prepare(model, optimizer, dataloader)\\n+\\n+ >>> with accelerator.join_uneven_inputs([ddp_model], even_batches=False):\\n+ ... for input, output in dataloader:\\n+ ... outputs = model(input)\\n+ ... loss = loss_func(outputs)\\n+ ... loss.backward()\\n+ ... optimizer.step()\\n+ ... optimizer.zero_grad()\\n+ ```\\n+\\n+ \"\"\"\\n+ if is_torch_version(\"<\", \"1.10.0\"):\\n+ raise ValueError(f\"Joining uneven inputs requires PyTorch >= 1.10.0, You have {torch.__version__}.\")\\n+\\n+ if self.distributed_type == DistributedType.MULTI_GPU:\\n+ dl_even_batches_values = []\\n+\\n+ if even_batches is not None:\\n+ # override value in batch sampler for map-style datasets\\n+ for dl_idx, dl in enumerate(self._dataloaders):\\n+ if isinstance(dl, DataLoaderDispatcher):\\n+ continue',\n", - " 'from_author': False},\n", - " {'body': '```suggestion\\r\\n \"Joining uneven inputs is only supported for multi-GPU training, as a result `join_uneven_inputs` will have no effect.\"\\r\\n```',\n", - " 'diff_hunk': '@@ -608,6 +612,91 @@ def accumulate(self, model):\\n with context(model):\\n yield\\n \\n+ @contextmanager\\n+ def join_uneven_inputs(self, joinables, even_batches=None):\\n+ \"\"\"\\n+ A context manager that facilitates distributed training or evaluation on uneven inputs, which acts as a wrapper\\n+ around `torch.distributed.algorithms.join`. This is useful when the total batch size does not evenly divide the\\n+ length of the dataset.\\n+\\n+ Args:\\n+ joinables (`List[torch.distributed.algorithms.Joinable]`):\\n+ A list of models or optimizers that subclass `torch.distributed.algorithms.Joinable`. Most commonly, a\\n+ PyTorch Module that was prepared with `Accelerator.prepare` for DistributedDataParallel training.\\n+ even_batches (`bool`, *optional*)\\n+ If set, this will override the value of `even_batches` set in the `Accelerator`. If it is not provided,\\n+ the default `Accelerator` value wil be used.\\n+\\n+ \\n+\\n+ `join_uneven_inputs` is only supported for Distributed Data Parallel training on multiple GPUs. For any other\\n+ configuration, this method will have no effect.\\n+\\n+ \\n+\\n+ \\n+\\n+ Overidding `even_batches` will not affect iterable-style data loaders.\\n+\\n+ \\n+\\n+ Example:\\n+\\n+ ```python\\n+ >>> from accelerate import Accelerator\\n+\\n+ >>> accelerator = Accelerator(even_batches=True)\\n+ >>> ddp_model, optimizer, dataloader = accelerator.prepare(model, optimizer, dataloader)\\n+\\n+ >>> with accelerator.join_uneven_inputs([ddp_model], even_batches=False):\\n+ ... for input, output in dataloader:\\n+ ... outputs = model(input)\\n+ ... loss = loss_func(outputs)\\n+ ... loss.backward()\\n+ ... optimizer.step()\\n+ ... optimizer.zero_grad()\\n+ ```\\n+\\n+ \"\"\"\\n+ if is_torch_version(\"<\", \"1.10.0\"):\\n+ raise ValueError(f\"Joining uneven inputs requires PyTorch >= 1.10.0, You have {torch.__version__}.\")\\n+\\n+ if self.distributed_type == DistributedType.MULTI_GPU:\\n+ dl_even_batches_values = []\\n+\\n+ if even_batches is not None:\\n+ # override value in batch sampler for map-style datasets\\n+ for dl_idx, dl in enumerate(self._dataloaders):\\n+ if isinstance(dl, DataLoaderDispatcher):\\n+ continue\\n+ dl_even_batches_values.append((dl_idx, dl.batch_sampler.even_batches))\\n+ dl.batch_sampler.even_batches = even_batches\\n+\\n+ if len(dl_even_batches_values) == 0:\\n+ warnings.warn(\\n+ \"Overridding even_batches is only supported for map-style datasets, yet all dataloaders given were iterable\"\\n+ )\\n+ else:\\n+ even_batches = self.even_batches\\n+\\n+ enable_join = False if even_batches else True\\n+ try:\\n+ with Join(joinables, enable=enable_join, throw_on_early_termination=False):\\n+ yield\\n+ finally:\\n+ # reset any batch samplers that have been modified\\n+ for dl_idx, even_batches_value in dl_even_batches_values:\\n+ self._dataloaders[dl_idx].batch_sampler.even_batches = even_batches_value\\n+ else:\\n+ # Even when disabled, Join expects models to subclass Joinable, so skip entirely for single process runs\\n+ if self.distributed_type != DistributedType.NO:\\n+ warnings.warn(\\n+ \"Joining uneven inputs is only supported for DistributedDataParallel training, join_unenven_inputs has no effect.\"',\n", - " 'from_author': False},\n", - " {'body': 'Sure, I could change the condition on the warning that occurs if only `DataLoaderDispacher`s are provided, so that it warns if at least `DataLoaderDispacher` is seen. ',\n", - " 'diff_hunk': '@@ -608,6 +612,91 @@ def accumulate(self, model):\\n with context(model):\\n yield\\n \\n+ @contextmanager\\n+ def join_uneven_inputs(self, joinables, even_batches=None):\\n+ \"\"\"\\n+ A context manager that facilitates distributed training or evaluation on uneven inputs, which acts as a wrapper\\n+ around `torch.distributed.algorithms.join`. This is useful when the total batch size does not evenly divide the\\n+ length of the dataset.\\n+\\n+ Args:\\n+ joinables (`List[torch.distributed.algorithms.Joinable]`):\\n+ A list of models or optimizers that subclass `torch.distributed.algorithms.Joinable`. Most commonly, a\\n+ PyTorch Module that was prepared with `Accelerator.prepare` for DistributedDataParallel training.\\n+ even_batches (`bool`, *optional*)\\n+ If set, this will override the value of `even_batches` set in the `Accelerator`. If it is not provided,\\n+ the default `Accelerator` value wil be used.\\n+\\n+ \\n+\\n+ `join_uneven_inputs` is only supported for Distributed Data Parallel training on multiple GPUs. For any other\\n+ configuration, this method will have no effect.\\n+\\n+ \\n+\\n+ \\n+\\n+ Overidding `even_batches` will not affect iterable-style data loaders.\\n+\\n+ \\n+\\n+ Example:\\n+\\n+ ```python\\n+ >>> from accelerate import Accelerator\\n+\\n+ >>> accelerator = Accelerator(even_batches=True)\\n+ >>> ddp_model, optimizer, dataloader = accelerator.prepare(model, optimizer, dataloader)\\n+\\n+ >>> with accelerator.join_uneven_inputs([ddp_model], even_batches=False):\\n+ ... for input, output in dataloader:\\n+ ... outputs = model(input)\\n+ ... loss = loss_func(outputs)\\n+ ... loss.backward()\\n+ ... optimizer.step()\\n+ ... optimizer.zero_grad()\\n+ ```\\n+\\n+ \"\"\"\\n+ if is_torch_version(\"<\", \"1.10.0\"):\\n+ raise ValueError(f\"Joining uneven inputs requires PyTorch >= 1.10.0, You have {torch.__version__}.\")\\n+\\n+ if self.distributed_type == DistributedType.MULTI_GPU:\\n+ dl_even_batches_values = []\\n+\\n+ if even_batches is not None:\\n+ # override value in batch sampler for map-style datasets\\n+ for dl_idx, dl in enumerate(self._dataloaders):\\n+ if isinstance(dl, DataLoaderDispatcher):\\n+ continue',\n", - " 'from_author': True},\n", - " {'body': 'I have made the change now',\n", - " 'diff_hunk': '@@ -608,6 +612,91 @@ def accumulate(self, model):\\n with context(model):\\n yield\\n \\n+ @contextmanager\\n+ def join_uneven_inputs(self, joinables, even_batches=None):\\n+ \"\"\"\\n+ A context manager that facilitates distributed training or evaluation on uneven inputs, which acts as a wrapper\\n+ around `torch.distributed.algorithms.join`. This is useful when the total batch size does not evenly divide the\\n+ length of the dataset.\\n+\\n+ Args:\\n+ joinables (`List[torch.distributed.algorithms.Joinable]`):\\n+ A list of models or optimizers that subclass `torch.distributed.algorithms.Joinable`. Most commonly, a\\n+ PyTorch Module that was prepared with `Accelerator.prepare` for DistributedDataParallel training.\\n+ even_batches (`bool`, *optional*)\\n+ If set, this will override the value of `even_batches` set in the `Accelerator`. If it is not provided,\\n+ the default `Accelerator` value wil be used.\\n+\\n+ \\n+\\n+ `join_uneven_inputs` is only supported for Distributed Data Parallel training on multiple GPUs. For any other\\n+ configuration, this method will have no effect.\\n+\\n+ \\n+\\n+ \\n+\\n+ Overidding `even_batches` will not affect iterable-style data loaders.\\n+\\n+ \\n+\\n+ Example:\\n+\\n+ ```python\\n+ >>> from accelerate import Accelerator\\n+\\n+ >>> accelerator = Accelerator(even_batches=True)\\n+ >>> ddp_model, optimizer, dataloader = accelerator.prepare(model, optimizer, dataloader)\\n+\\n+ >>> with accelerator.join_uneven_inputs([ddp_model], even_batches=False):\\n+ ... for input, output in dataloader:\\n+ ... outputs = model(input)\\n+ ... loss = loss_func(outputs)\\n+ ... loss.backward()\\n+ ... optimizer.step()\\n+ ... optimizer.zero_grad()\\n+ ```\\n+\\n+ \"\"\"\\n+ if is_torch_version(\"<\", \"1.10.0\"):\\n+ raise ValueError(f\"Joining uneven inputs requires PyTorch >= 1.10.0, You have {torch.__version__}.\")\\n+\\n+ if self.distributed_type == DistributedType.MULTI_GPU:\\n+ dl_even_batches_values = []\\n+\\n+ if even_batches is not None:\\n+ # override value in batch sampler for map-style datasets\\n+ for dl_idx, dl in enumerate(self._dataloaders):\\n+ if isinstance(dl, DataLoaderDispatcher):\\n+ continue',\n", - " 'from_author': True}],\n", - " 'context': [{'body': 'The docs for this PR live [here](/static-proxy?url=https%3A%2F%2Fmoon-ci-docs.huggingface.co%2Fdocs%2Faccelerate%2Fpr_820). All of your documentation changes will be reflected on that endpoint.',\n", - " 'from_author': False},\n", - " {'body': \"@sgugger @muellerzr Is this the sort of thing that you had in mind? If so, I can write some docs and perhaps a small example.\\r\\n\\r\\nOne thing that I was thinking about whilst working on this though is that, although uneven batches is likely only going to be needed during evaluation, the approach that we have taken with setting a variable at the accelerator level is that it will apply to both training and validation. So an example would look something like:\\r\\n\\r\\n```\\r\\naccelerator = Accelerator(even_batches=True)\\r\\n\\r\\nmodel, optimizer, train_dataloader, val_dataloader = create_components()\\r\\n\\r\\nddp_model, optimizer, train_dataloader = accelerator.prepare(model, optimizer, train_dataloader)\\r\\n\\r\\n# training loop as normal\\r\\n...\\r\\n\\r\\n\\r\\naccelerator.even_batches = False\\r\\n\\r\\nval_dataloader = accelerator.prepare(val_dataloader)\\r\\n\\r\\n# run eval loop\\r\\nwith accelerator.join_uneven_inputs([ddp_model]):\\r\\n ... \\r\\n```\\r\\nIt's a bit clunky, but as it is an advanced use case, it may be acceptable.\\r\\n\\r\\nI'm interested to hear any thoughts that you have!\",\n", - " 'from_author': True},\n", - " {'body': '@Chris-hughes10 @sgugger what do you think about something like this. Here\\'s an MVP example showing the basic behavior:\\r\\n\\r\\n```python\\r\\nfrom contextlib import contextmanager\\r\\n\\r\\nclass A:\\r\\n \"A mock version of what we would call the dataloader\"\\r\\n def __init__(self, a:int):\\r\\n self.a = a\\r\\n\\r\\nclass B:\\r\\n \"A mock version of what we would call the Accelerator\"\\r\\n @contextmanager\\r\\n def do_the_thing(self, a):\\r\\n old_a = a.a\\r\\n a.a = 2\\r\\n try:\\r\\n yield\\r\\n finally:\\r\\n a.a = old_a\\r\\n\\r\\na = A(1)\\r\\nb = B()\\r\\n\\r\\nwith b.do_the_thing(a):\\r\\n print(a.a)\\r\\nprint(a.a)\\r\\n```\\r\\nYou\\'d find that `a.a` prints 2 in the first and 1 in the second. We can apply that similarly here since we expect if the user is calling this that they will be training/evaluating/doing things on uneven inputs, leading to an API such as:\\r\\n\\r\\n```python\\r\\nwith accelerator.join_uneven_inputs([model], dataloader=eval_dataloader):\\r\\n ...\\r\\n```\\r\\nInside this ctx manager `a.a` would be `eval_dataloader.even_batches`, and if it wasn\\'t disabled originally we can print for the user that using this context manager will temporarily change this value or something. This let\\'s us get rid of the explicit of setting in the accelerator and having to prepare later, so the only code change is the context manager. \\r\\n\\r\\nLmk what you both think\\r\\n\\r\\nOtherwise I think you\\'ve done a great job with it! Super small API design nit of my own 😄 \\r\\n\\r\\n(specifically this would do `eval_dataloader.even_batches=False` during the `a.a` bits in the context manager example)\\r\\n\\r\\nIt could probably go as far as accept a tuple of models and dataloaders and Accelerate could figure out which one is which and apply them rather than having to pass in a list 🤔 Decisions decisions :) \\r\\n\\r\\nIf wanted I could do some of this in a follow up PR, as this PR is great already',\n", - " 'from_author': False},\n", - " {'body': 'Agreed, maybe remove some of the magic with an `override` arg as well (that defaults to `False`) and a very explicit docstring as middle ground. (Also would love others opinions 😄 )',\n", - " 'from_author': False},\n", - " {'body': \"Hi both, I have given this some thought and have a couple of ideas. \\r\\n\\r\\nMy first reaction was to agree with @sgugger, changing the `even_batches` value in the dataloader seems a little too magical, but it definitely makes the particular use case smoother. I think the main irk that I have with this is that it is changing a value on an unrelated object; if it was changing the value on the accelerator itself, I think it would be quite clean. Additionally, it seems a bit like a side effect, as it isn't really part of the same operation as the join.\\r\\n\\r\\nOne thought that I had, is that we could expose a method on the dataloader to make it easy to toggle this behaviour. Perhaps something like:\\r\\n```\\r\\naccelerator = Accelerator()\\r\\n\\r\\nmodel, optimizer, train_dataloader, val_dataloader = create_components()\\r\\n\\r\\nddp_model, optimizer, train_dataloader, val_dataloader = accelerator.prepare(model, optimizer, train_dataloader, val_dataloader)\\r\\n\\r\\n# training loop as normal\\r\\n...\\r\\n\\r\\n\\r\\nval_dataloader.ensure_even_batches(enabled=False)\\r\\n\\r\\n# run eval loop\\r\\nwith accelerator.join_uneven_inputs([ddp_model]):\\r\\n ... \\r\\n```\\r\\nThere is still an extra step, but then it is very explicit what is going on. Conversely, this does mean that the flag in the accelerator is a little redundant. Also, it probably means that, in most cases, setting this flag and the context manager will have to be used together.\\r\\n\\r\\nAs an alternative, if we would like to keep everything accelerator-centric, we could potentially have the accelerator maintain a reference to any prepared dataloaders. This way, if we change the setting on the accelerator, we could change the value on all dataloaders. This could look something like:\\r\\n\\r\\n```\\r\\naccelerator = Accelerator(even_batches=True)\\r\\n\\r\\nmodel, optimizer, train_dataloader, val_dataloader = create_components()\\r\\n\\r\\nddp_model, optimizer, train_dataloader, val_dataloader = accelerator.prepare(model, optimizer, train_dataloader, val_dataloader)\\r\\n\\r\\n# training loop as normal\\r\\n...\\r\\n\\r\\n# run eval loop\\r\\nwith accelerator.join_uneven_inputs([ddp_model], even_batches=False):\\r\\n ... \\r\\n```\\r\\nPersonally, I think that this is quite clean, but it does mean adding additional logic to the accelerator, and perhaps the actual difference between this solution and the one proposed by @muellerzr is a bit academic.\\r\\n\\r\\nEDIT: cc @pacman100 , I think we commented essentially at the same time. What do you think about the second approach proposed here?\",\n", - " 'from_author': True},\n", - " {'body': 'Yes, I like the second approach proposed above ✨',\n", - " 'from_author': False},\n", - " {'body': \"@Chris-hughes10 I too like that solution, as it keeps in line with how we design bits like this such as the gradient accumulation wrapper 😄 It also keeps the dataloader preparation the same as optimizer and model in the sense of pointers. This just means that there would also need to be a change to `accelerator.free_memory()` to also free the dataloader references. \\r\\n\\r\\nI also think it should just be an `Accelerator` only attribute, there wouldn't be a need to put this in `AcceleratorState` since we deem that too magical already 😃 \",\n", - " 'from_author': False},\n", - " {'body': 'Works for me too!', 'from_author': False},\n", - " {'body': 'It looks like there are failing tests that are unrelated to this PR. Perhaps https://github.com/huggingface/accelerate/pull/825 will solve the problem.',\n", - " 'from_author': True},\n", - " {'body': '@Chris-hughes10 correct, you can ignore the \"checkpoint\" and \"rest\" failures here. ',\n", - " 'from_author': False},\n", - " {'body': \"I have made some more updates, so I think it should be feature complete now.\\r\\n\\r\\nUnfortunately, I am getting a couple of failing tests on the CI, which look like they occur in the bf16 section of the test script, comparing training on CPU and GPU. I am unable to reproduce locally as I don't have a device with bf16 available, and the rest of the tests in that script seem to pass. \\r\\n\\r\\nIt is quite strange though, because looking at what it is failing, it doesn't really interact with anything in this PR other than the accelerator maintaining references to the dataloader; which doesn't seem to be a problem for the other training configurations in the script. After noticing that the accelerator state is cleared multiple times in the test - which does not include the dataloaders, as discussed above - I also tried adding resetting the accelerator manually before the bf16 section, using `accelerator.free_memory`, but this did not make any difference. \\r\\n\\r\\nHonesty, I am stumped at this point. Do you guys have any ideas on what could be the issue?\",\n", - " 'from_author': True},\n", - " {'body': \"@Chris-hughes10 I'll be able to dig into this further tomorrow and see if I can't root out what's going on, so don't stress on it today 😃 \",\n", - " 'from_author': False},\n", - " {'body': 'Hello @Chris-hughes10 , I went over the PR and tests. I found the cause of the tests failing. In the below screenshot please find the changes required in `test_accelerator.py` to resolve failing tests.\\r\\n\\r\\n\"Screenshot\\r\\n',\n", - " 'from_author': False},\n", - " {'body': \"> Hello @Chris-hughes10 , I went over the PR and tests. I found the cause of the tests failing. In the below screenshot please find the changes required in `test_accelerator.py` to resolve failing tests.\\r\\n> \\r\\n\\r\\n\\r\\nAwesome, thanks for this! I didn't anticipate that the state would carry over outside of each test, but I suppose it makes sense for a singleton. I will make the change.\",\n", - " 'from_author': True},\n", - " {'body': 'The docs for this PR live [here](/static-proxy?url=https%3A%2F%2Fmoon-ci-docs.huggingface.co%2Fdocs%2Faccelerate%2Fpr_820). All of your documentation changes will be reflected on that endpoint.',\n", - " 'from_author': False},\n", - " {'body': 'There seems to be a problem with building the PR documentation, is this something that I can fix?',\n", - " 'from_author': True},\n", - " {'body': \"@Chris-hughes10 nope don't worry about that, it's a problem on our side :)\",\n", - " 'from_author': False},\n", - " {'body': \"Thanks a bunch @Chris-hughes10, excellent work! I'll be keeping an eye on our CI that runs multi-gpu tests post this merge to see if anything is up and I'll let you know if there is and I can't seem to figure out what may be failing quickly :) \",\n", - " 'from_author': False},\n", - " {'body': 'Thanks for helping to get this merged @muellerzr @sgugger @pacman100! ',\n", - " 'from_author': True},\n", - " {'body': 'The docs for this PR live [here](/static-proxy?url=https%3A%2F%2Fmoon-ci-docs.huggingface.co%2Fdocs%2Faccelerate%2Fpr_820). All of your documentation changes will be reflected on that endpoint.',\n", - " 'from_author': False}],\n", - " 'url': 'https://api.github.com/repos/huggingface/accelerate/pulls/820'},\n", - " 1109839693: {'diff': 'diff --git a/docs/source/usage_guides/megatron_lm.mdx b/docs/source/usage_guides/megatron_lm.mdx\\nindex d26ecd4b5..23b024f75 100644\\n--- a/docs/source/usage_guides/megatron_lm.mdx\\n+++ b/docs/source/usage_guides/megatron_lm.mdx\\n@@ -435,6 +435,103 @@ python checkpoint_utils/megatgron_gpt2/checkpoint_reshaping_and_interoperability\\n --print-checkpoint-structure\\n ```\\n \\n+## Megatron-LM GPT models support returning logits and `megatron_generate` function for text generation\\n+\\n+1. Returning logits require setting `require_logits=True` in MegatronLMPlugin as shown below. \\n+These would be available on the in the last stage of pipeline.\\n+```python\\n+megatron_lm_plugin = MegatronLMPlugin(return_logits=True)\\n+```\\n+\\n+2. `megatron_generate` method for Megatron-LM GPT model: This will use Tensor and Pipeline Parallelism to complete \\n+generations for a batch of inputs when using greedy with/without top_k/top_p sampling and for individual prompt inputs when using beam search decoding. \\n+Only a subset of features of transformers generate is supported. This will help in using large models via tensor and pipeline parallelism \\n+for generation (already does key-value caching and uses fused kernels by default).\\n+This requires data parallel size to be 1, sequence parallelism and activation checkpointing to be disabled.\\n+It also requires specifying path to tokenizer\\'s vocab file and merges file. \\n+Below example shows how to configure and use `megatron_generate` method for Megatron-LM GPT model.\\n+```python\\n+# specifying tokenizer\\'s vocab and merges file\\n+vocab_file = os.path.join(args.resume_from_checkpoint, \"vocab.json\")\\n+merge_file = os.path.join(args.resume_from_checkpoint, \"merges.txt\")\\n+other_megatron_args = {\"vocab_file\": vocab_file, \"merge_file\": merge_file}\\n+megatron_lm_plugin = MegatronLMPlugin(other_megatron_args=other_megatron_args)\\n+\\n+# inference using `megatron_generate` functionality\\n+tokenizer.pad_token = tokenizer.eos_token\\n+max_new_tokens = 64\\n+batch_texts = [\\n+ \"Are you human?\",\\n+ \"The purpose of life is\",\\n+ \"The arsenal was constructed at the request of\",\\n+ \"How are you doing these days?\",\\n+]\\n+batch_encodings = tokenizer(batch_texts, return_tensors=\"pt\", padding=True)\\n+\\n+# top-p sampling\\n+generated_tokens = model.megatron_generate(\\n+ batch_encodings[\"input_ids\"],\\n+ batch_encodings[\"attention_mask\"],\\n+ max_new_tokens=max_new_tokens,\\n+ top_p=0.8,\\n+ top_p_decay=0.5,\\n+ temperature=0.9,\\n+)\\n+decoded_preds = tokenizer.batch_decode(generated_tokens.cpu().numpy())\\n+accelerator.print(decoded_preds)\\n+\\n+# top-k sampling\\n+generated_tokens = model.megatron_generate(\\n+ batch_encodings[\"input_ids\"],\\n+ batch_encodings[\"attention_mask\"],\\n+ max_new_tokens=max_new_tokens,\\n+ top_k=50,\\n+ temperature=0.9,\\n+)\\n+decoded_preds = tokenizer.batch_decode(generated_tokens.cpu().numpy())\\n+accelerator.print(decoded_preds)\\n+\\n+# adding `bos` token at the start\\n+generated_tokens = model.megatron_generate(\\n+ batch_encodings[\"input_ids\"], batch_encodings[\"attention_mask\"], max_new_tokens=max_new_tokens, add_BOS=True\\n+)\\n+decoded_preds = tokenizer.batch_decode(generated_tokens.cpu().numpy())\\n+accelerator.print(decoded_preds)\\n+\\n+# beam search => only takes single prompt\\n+batch_texts = [\"The purpose of life is\"]\\n+batch_encodings = tokenizer(batch_texts, return_tensors=\"pt\", padding=True)\\n+generated_tokens = model.megatron_generate(\\n+ batch_encodings[\"input_ids\"],\\n+ batch_encodings[\"attention_mask\"],\\n+ max_new_tokens=max_new_tokens,\\n+ num_beams=20,\\n+ length_penalty=1.5,\\n+)\\n+decoded_preds = tokenizer.batch_decode(generated_tokens.cpu().numpy())\\n+accelerator.print(decoded_preds)\\n+```\\n+\\n+3. An end-to-end example of using `megatron_generate` method for Megatron-LM GPT model is available at\\n+[megatron_gpt2_generation.py](https://github.com/pacman100/accelerate-megatron-test/blob/main/src/inference/megatron_gpt2_generation.py) with \\n+config file [megatron_lm_gpt_generate_config.yaml](https://github.com/pacman100/accelerate-megatron-test/blob/main/src/Configs/megatron_lm_gpt_generate_config.yaml).\\n+The bash script with accelerate launch command is available at [megatron_lm_gpt_generate.sh](https://github.com/pacman100/accelerate-megatron-test/blob/main/megatron_lm_gpt_generate.sh).\\n+The output logs of the script are available at [megatron_lm_gpt_generate.log](https://github.com/pacman100/accelerate-megatron-test/blob/main/output_logs/megatron_lm_gpt_generate.log).\\n+\\n+## Support for ROPE and ALiBi Positional embeddings and Multi-Query Attention\\n+\\n+1. For ROPE/ALiBi attention, pass `position_embedding_type` with `(\"absolute\" | \"rotary\" | \"alibi\")` to `MegatronLMPlugin` as shown below.\\n+```python\\n+other_megatron_args = {\"position_embedding_type\": \"alibi\"}\\n+megatron_lm_plugin = MegatronLMPlugin(other_megatron_args=other_megatron_args)\\n+```\\n+\\n+2. For Multi-Query Attention, pass `attention_head_type` with `(\"multihead\" | \"multiquery\")` to `MegatronLMPlugin` as shown below.\\n+```python\\n+other_megatron_args = {\"attention_head_type\": \"multiquery\"}\\n+megatron_lm_plugin = MegatronLMPlugin(other_megatron_args=other_megatron_args)\\n+```\\n+\\n ## Caveats\\n \\n 1. Supports Transformers GPT2, Megatron-BERT and T5 models.\\n@@ -445,8 +542,12 @@ there is quite complex interplay of pipeline, tensor and data parallelsim behind\\n The `model(**batch_data)` call return loss(es) averaged across the data parallel ranks.\\n This is fine for most cases wherein pre-training jobs are run using Megatron-LM features and\\n you can easily compute the `perplexity` using the loss. \\n+For GPT model, returning logits in addition to loss(es) is supported. \\n+These logits aren\\'t gathered across data prallel ranks. Use `accelerator.utils.gather_across_data_parallel_groups`\\n+to gather logits across data parallel ranks. These logits along with labels can be used for computing various \\n+performance metrics. \\n \\n-3. The main process is the last rank as the losses are available in the last stage of pipeline.\\n+3. The main process is the last rank as the losses/logits are available in the last stage of pipeline.\\n `accelerator.is_main_process` and `accelerator.is_local_main_process` return `True` for last rank when using \\n Megatron-LM integration.\\n \\ndiff --git a/src/accelerate/utils/__init__.py b/src/accelerate/utils/__init__.py\\nindex b472ec556..0e67daf4b 100644\\n--- a/src/accelerate/utils/__init__.py\\n+++ b/src/accelerate/utils/__init__.py\\n@@ -107,6 +107,7 @@\\n MegatronLMSchedulerWrapper,\\n T5TrainStep,\\n avg_losses_across_data_parallel_group,\\n+ gather_across_data_parallel_groups,\\n )\\n from .megatron_lm import initialize as megatron_lm_initialize\\n from .megatron_lm import prepare_data_loader as megatron_lm_prepare_data_loader\\ndiff --git a/src/accelerate/utils/dataclasses.py b/src/accelerate/utils/dataclasses.py\\nindex 8227fa5bc..5fcb5e68f 100644\\n--- a/src/accelerate/utils/dataclasses.py\\n+++ b/src/accelerate/utils/dataclasses.py\\n@@ -771,6 +771,18 @@ class MegatronLMPlugin:\\n default=False,\\n metadata={\"help\": \"Whether to set all logging options.\"},\\n )\\n+ eval_iters: int = field(\\n+ default=100, metadata={\"help\": \"Number of iterations to run for evaluation validation/test for.\"}\\n+ )\\n+ eval_interval: int = field(\\n+ default=1000, metadata={\"help\": \"Interval between running evaluation on validation set.\"}\\n+ )\\n+ return_logits: bool = field(\\n+ default=False,\\n+ metadata={\"help\": \"Whether to return logits from the model.\"},\\n+ )\\n+\\n+ # custom train step args\\n custom_train_step_class: Optional[Any] = field(\\n default=None,\\n metadata={\"help\": \"Custom train step class.\"},\\n@@ -779,11 +791,22 @@ class MegatronLMPlugin:\\n default=None,\\n metadata={\"help\": \"Custom train step kwargs.\"},\\n )\\n- eval_iters: int = field(\\n- default=100, metadata={\"help\": \"Number of iterations to run for evaluation validation/test for.\"}\\n+\\n+ # custom model args\\n+ custom_model_provider_function: Optional[Callable] = field(\\n+ default=None,\\n+ metadata={\"help\": \"Custom model provider function.\"},\\n )\\n- eval_interval: int = field(\\n- default=1000, metadata={\"help\": \"Interval between running evaluation on validation set.\"}\\n+ custom_prepare_model_function: Optional[Callable] = field(\\n+ default=None,\\n+ metadata={\"help\": \"Custom prepare model function.\"},\\n+ )\\n+\\n+ # remaining args such as enabling Alibi/ROPE positional embeddings,\\n+ # wandb logging, Multi-Query Attention, etc.\\n+ other_megatron_args: Optional[Dict[str, Any]] = field(\\n+ default=None,\\n+ metadata={\"help\": \"Other Megatron-LM arguments. Please refer Megatron-LM\"},\\n )\\n \\n def __post_init__(self):\\n@@ -840,6 +863,8 @@ def __post_init__(self):\\n self.megatron_lm_default_args[\"tensorboard_dir\"] = self.tensorboard_dir\\n if self.set_all_logging_options:\\n self.set_tensorboard_logging_options()\\n+ if self.other_megatron_args is not None:\\n+ self.megatron_lm_default_args.update(self.other_megatron_args)\\n \\n def set_network_size_args(self, model, batch_data=None):\\n # Check if the model is either BERT, GPT or T5 else raise error\\n@@ -884,6 +909,8 @@ def set_network_size_args(self, model, batch_data=None):\\n else:\\n self.seq_length = max_position_embeddings\\n self.megatron_lm_default_args[\"seq_length\"] = self.seq_length\\n+ self.megatron_lm_default_args[\"return_logits\"] = self.return_logits\\n+ self.megatron_lm_default_args[\"tokenizer_type\"] = \"GPT2BPETokenizer\"\\n elif \"t5\" in model.config.model_type.lower():\\n model_type_name = \"t5\"\\n num_layers = model.config.num_layers\\ndiff --git a/src/accelerate/utils/imports.py b/src/accelerate/utils/imports.py\\nindex bdb53988a..85e20a746 100644\\n--- a/src/accelerate/utils/imports.py\\n+++ b/src/accelerate/utils/imports.py\\n@@ -13,7 +13,9 @@\\n # limitations under the License.\\n \\n import importlib\\n+import os\\n import sys\\n+from distutils.util import strtobool\\n from functools import lru_cache\\n \\n import torch\\n@@ -90,10 +92,11 @@ def is_bf16_available(ignore_tpu=False):\\n \\n \\n def is_megatron_lm_available():\\n- package_exists = importlib.util.find_spec(\"megatron\") is not None\\n- if package_exists:\\n- megatron_version = parse(importlib_metadata.version(\"megatron-lm\"))\\n- return compare_versions(megatron_version, \">=\", \"2.2.0\")\\n+ if strtobool(os.environ.get(\"USE_MEGATRON_LM\", \"False\")) == 1:\\n+ package_exists = importlib.util.find_spec(\"megatron\") is not None\\n+ if package_exists:\\n+ megatron_version = parse(importlib_metadata.version(\"megatron-lm\"))\\n+ return compare_versions(megatron_version, \">=\", \"2.2.0\")\\n return False\\n \\n \\ndiff --git a/src/accelerate/utils/megatron_lm.py b/src/accelerate/utils/megatron_lm.py\\nindex cb988e360..afee2fcca 100644\\n--- a/src/accelerate/utils/megatron_lm.py\\n+++ b/src/accelerate/utils/megatron_lm.py\\n@@ -20,11 +20,12 @@\\n import torch\\n import torch.nn.functional as F\\n from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss\\n+from torch.nn.parallel.distributed import DistributedDataParallel as torchDDP\\n \\n from ..optimizer import AcceleratedOptimizer\\n from ..scheduler import AcceleratedScheduler\\n from .imports import is_megatron_lm_available, is_transformers_available\\n-from .operations import send_to_device\\n+from .operations import recursively_apply, send_to_device\\n \\n \\n if is_transformers_available():\\n@@ -41,6 +42,7 @@\\n get_num_microbatches,\\n get_tensorboard_writer,\\n get_timers,\\n+ get_tokenizer,\\n mpu,\\n print_rank_0,\\n print_rank_last,\\n@@ -56,16 +58,24 @@\\n set_jit_fusion_options,\\n write_args_to_tensorboard,\\n )\\n- from megatron.model import BertModel, GPTModel, ModelType, T5Model\\n+ from megatron.model import BertModel\\n+ from megatron.model import DistributedDataParallel as LocalDDP\\n+ from megatron.model import Float16Module, GPTModel, ModelType, T5Model\\n from megatron.model.classification import Classification\\n from megatron.optimizer import get_megatron_optimizer\\n from megatron.schedules import get_forward_backward_func\\n+ from megatron.text_generation.communication import broadcast_int_list, broadcast_tensor\\n+ from megatron.text_generation.generation import (\\n+ beam_search_and_return_on_first_stage,\\n+ generate_tokens_probs_and_return_on_first_stage,\\n+ )\\n from megatron.tokenizer.tokenizer import _vocab_size_with_padding\\n from megatron.training import get_model, get_optimizer_param_scheduler, training_log\\n from megatron.utils import (\\n average_losses_across_data_parallel_group,\\n calc_params_l2_norm,\\n get_ltor_masks_and_position_ids,\\n+ unwrap_model,\\n )\\n \\n \\n@@ -114,13 +124,21 @@ def model_provider_func(pre_process=True, post_process=True, add_encoder=True, a\\n def prepare_model(accelerator):\\n accelerator.print(\"Preparing model\")\\n args = get_args()\\n- if args.model_type_name == \"bert\" or args.model_type_name == \"gpt\":\\n- model_type = ModelType.encoder_or_decoder\\n- elif args.model_type_name == \"t5\":\\n- model_type = ModelType.encoder_and_decoder\\n- if args.pipeline_model_parallel_split_rank is None and args.pipeline_model_parallel_size > 1:\\n- args.pipeline_model_parallel_split_rank = args.pipeline_model_parallel_size // 2\\n- model = get_model(model_provider_func, model_type)\\n+ if accelerator.state.megatron_lm_plugin.custom_prepare_model_function is not None:\\n+ if accelerator.state.megatron_lm_plugin.custom_model_provider_function is None:\\n+ raise ValueError(\\n+ \"You must provide a `custom_model_provider_function` when using a `custom_prepare_model_function`.\"\\n+ )\\n+ custom_model_provider_func = accelerator.state.megatron_lm_plugin.custom_model_provider_function\\n+ model = accelerator.state.megatron_lm_plugin.custom_prepare_model_function(custom_model_provider_func)\\n+ else:\\n+ if args.model_type_name == \"bert\" or args.model_type_name == \"gpt\":\\n+ model_type = ModelType.encoder_or_decoder\\n+ elif args.model_type_name == \"t5\":\\n+ model_type = ModelType.encoder_and_decoder\\n+ if args.pipeline_model_parallel_split_rank is None and args.pipeline_model_parallel_size > 1:\\n+ args.pipeline_model_parallel_split_rank = args.pipeline_model_parallel_size // 2\\n+ model = get_model(model_provider_func, model_type)\\n return model\\n \\n \\n@@ -593,6 +611,12 @@ def __init__(self, args):\\n self.loss_func = self.get_loss_func()\\n self.forward_step = self.get_forward_step_func()\\n self.eod_token = args.padded_vocab_size - 1\\n+ if args.vocab_file is not None:\\n+ tokenizer = get_tokenizer()\\n+ self.eod_token = tokenizer.eod\\n+ self.reset_position_ids = args.reset_position_ids\\n+ self.reset_attention_mask = args.reset_attention_mask\\n+ self.eod_mask_loss = args.eod_mask_loss\\n if not args.model_return_dict:\\n self.model_output_class = None\\n else:\\n@@ -601,8 +625,6 @@ def __init__(self, args):\\n def get_batch_func(self, megatron_dataset_flag):\\n def get_batch_megatron(data_iterator):\\n \"\"\"Generate a batch\"\"\"\\n- args = get_args()\\n-\\n # Items and their type.\\n keys = [\"text\"]\\n datatype = torch.int64\\n@@ -621,7 +643,7 @@ def get_batch_megatron(data_iterator):\\n \\n # Get the masks and postition ids.\\n attention_mask, loss_mask, position_ids = get_ltor_masks_and_position_ids(\\n- tokens, self.eod_token, args.reset_position_ids, args.reset_attention_mask, args.eod_mask_loss\\n+ tokens, self.eod_token, self.reset_position_ids, self.reset_attention_mask, self.eod_mask_loss\\n )\\n \\n return tokens, labels, loss_mask, attention_mask, position_ids\\n@@ -638,7 +660,7 @@ def get_batch_transformer(data_iterator):\\n tokens = tokens_[:, :-1].contiguous()\\n # Get the masks and postition ids.\\n attention_mask, loss_mask, position_ids = get_ltor_masks_and_position_ids(\\n- tokens, self.eod_token, False, False, True\\n+ tokens, self.eod_token, self.reset_position_ids, self.reset_attention_mask, True\\n )\\n return tokens, labels, loss_mask, attention_mask, position_ids\\n \\n@@ -648,15 +670,24 @@ def get_batch_transformer(data_iterator):\\n return get_batch_transformer\\n \\n def get_loss_func(self):\\n+ args = get_args()\\n+\\n def loss_func(loss_mask, output_tensor):\\n- losses = output_tensor.float()\\n+ if args.return_logits:\\n+ losses, logits = output_tensor\\n+ else:\\n+ losses = output_tensor\\n+ losses = losses.float()\\n loss_mask = loss_mask.view(-1).float()\\n loss = torch.sum(losses.view(-1) * loss_mask) / loss_mask.sum()\\n \\n # Reduce loss for logging.\\n averaged_loss = average_losses_across_data_parallel_group([loss])\\n \\n- return loss, {\"lm loss\": averaged_loss[0]}\\n+ output_dict = {\"lm loss\": averaged_loss[0]}\\n+ if args.return_logits:\\n+ output_dict.update({\"logits\": logits})\\n+ return loss, output_dict\\n \\n return loss_func\\n \\n@@ -1031,7 +1062,10 @@ def train_step(self, **batch_data):\\n loss_reduced = {}\\n for key in losses_reduced[0]:\\n losses_reduced_for_key = [x[key] for x in losses_reduced]\\n- loss_reduced[key] = sum(losses_reduced_for_key) / len(losses_reduced_for_key)\\n+ if len(losses_reduced_for_key[0].shape) == 0:\\n+ loss_reduced[key] = sum(losses_reduced_for_key) / len(losses_reduced_for_key)\\n+ else:\\n+ loss_reduced[key] = torch.concat(losses_reduced_for_key)\\n return loss_reduced, skipped_iter, grad_norm, num_zeros_in_grad\\n return {}, skipped_iter, grad_norm, num_zeros_in_grad\\n \\n@@ -1079,7 +1113,10 @@ def eval_step(self, **batch_data):\\n loss_reduced = {}\\n for key in loss_dicts[0]:\\n losses_reduced_for_key = [x[key] for x in loss_dicts]\\n- loss_reduced[key] = sum(losses_reduced_for_key) / len(losses_reduced_for_key)\\n+ if len(losses_reduced_for_key[0].shape) == 0:\\n+ loss_reduced[key] = sum(losses_reduced_for_key) / len(losses_reduced_for_key)\\n+ else:\\n+ loss_reduced[key] = torch.concat(losses_reduced_for_key)\\n return loss_reduced\\n else:\\n return {}\\n@@ -1134,10 +1171,15 @@ def forward(self, **batch_data):\\n \\n loss = torch.tensor(0.0, device=args.local_rank)\\n for key in loss_dict:\\n- loss += loss_dict[key]\\n+ if len(loss_dict[key].shape) == 0:\\n+ loss += loss_dict[key]\\n+\\n+ logits = None\\n+ if \"logits\" in loss_dict:\\n+ logits = loss_dict[\"logits\"]\\n # loss = reduce(loss)\\n if self.train_step_handler.model_output_class is not None:\\n- return self.train_step_handler.model_output_class(loss=loss)\\n+ return self.train_step_handler.model_output_class(loss=loss, logits=logits)\\n return loss\\n \\n def log_eval_results(self):\\n@@ -1186,6 +1228,184 @@ def load_checkpoint(self, input_dir):\\n if args.fp16 and self.iteration == 0:\\n self.optimizer.reload_model_params()\\n \\n+ def megatron_generate(\\n+ self,\\n+ inputs,\\n+ attention_mask=None,\\n+ max_length=None,\\n+ max_new_tokens=None,\\n+ num_beams=None,\\n+ temperature=None,\\n+ top_k=None,\\n+ top_p=None,\\n+ length_penalty=None,\\n+ **kwargs,\\n+ ):\\n+ \"\"\"\\n+ Generate method for GPT2 model. This method is used for inference. Supports both greedy and beam search along\\n+ with sampling. Refer the Megatron-LM repo for more details\\n+\\n+ Args:\\n+ inputs (torch.Tensor): input ids\\n+ attention_mask (torch.Tensor, optional): attention mask. Defaults to None.\\n+ max_length (int, optional): max length of the generated sequence. Defaults to None.\\n+ Either this or max_new_tokens should be provided.\\n+ max_new_tokens (int, optional): max number of tokens to be generated. Defaults to None.\\n+ Either this or max_length should be provided.\\n+ num_beams (int, optional): number of beams to use for beam search. Defaults to None.\\n+ temperature (float, optional): temperature for sampling. Defaults to 1.0.\\n+ top_k (int, optional): top k tokens to consider for sampling. Defaults to 0.0.\\n+ top_p (float, optional): tokens in top p probability are considered for sampling. Defaults to 0.0.\\n+ length_penalty (float, optional): length penalty for beam search. Defaults to None.\\n+ kwargs: additional key-value arguments\\n+ \"\"\"\\n+\\n+ # checking if required arguments are passed\\n+ args = get_args()\\n+ if args.model_type_name != \"gpt\":\\n+ raise NotImplementedError(\"Generate method is not implemented for this model\")\\n+\\n+ if args.data_parallel_size > 1:\\n+ raise ValueError(\"Generate method requires data parallelism to be 1\")\\n+\\n+ if args.sequence_parallel:\\n+ raise ValueError(\"Generate method requires sequence parallelism to be False\")\\n+\\n+ if args.recompute_granularity is not None:\\n+ raise ValueError(\"Checkpoint activations cannot be set for inference\")\\n+\\n+ if args.vocab_file is None:\\n+ raise ValueError(\"Vocab file is required for inference\")\\n+\\n+ # Prepare inputs\\n+ if max_length is None and max_new_tokens is None:\\n+ raise ValueError(\"`max_length` or `max_new_tokens` are required for inference\")\\n+\\n+ if temperature is None:\\n+ temperature = 1.0\\n+ elif not (0.0 < temperature <= 100.0):\\n+ raise ValueError(\"temperature must be a positive number less than or equal to 100.0\")\\n+\\n+ if top_k is None:\\n+ top_k = 0\\n+ elif not (0 <= top_k <= 1000):\\n+ raise ValueError(\"top_k must be a positive number less than or equal to 1000\")\\n+\\n+ if top_p is None:\\n+ top_p = 0.0\\n+ elif top_p > 0.0 and top_k > 0.0:\\n+ raise ValueError(\"top_p and top_k sampling cannot be set together\")\\n+ else:\\n+ if not (0.0 <= top_p <= 1.0):\\n+ raise ValueError(\"top_p must be less than or equal to 1.0\")\\n+\\n+ top_p_decay = kwargs.get(\"top_p_decay\", 0.0)\\n+ if not (0.0 <= top_p_decay <= 1.0):\\n+ raise ValueError(\"top_p_decay must be less than or equal to 1.0\")\\n+\\n+ top_p_bound = kwargs.get(\"top_p_bound\", 0.0)\\n+ if not (0.0 <= top_p_bound <= 1.0):\\n+ raise ValueError(\"top_p_bound must be less than or equal to 1.0\")\\n+\\n+ add_BOS = kwargs.get(\"add_BOS\", False)\\n+ if not (isinstance(add_BOS, bool)):\\n+ raise ValueError(\"add_BOS must be a boolean\")\\n+\\n+ beam_width = num_beams\\n+ if beam_width is not None:\\n+ if not isinstance(beam_width, int):\\n+ raise ValueError(\"beam_width must be an integer\")\\n+ if beam_width < 1:\\n+ raise ValueError(\"beam_width must be greater than 0\")\\n+ if inputs.shape[0] > 1:\\n+ return \"When doing beam_search, batch size must be 1\"\\n+\\n+ tokenizer = get_tokenizer()\\n+\\n+ stop_token = kwargs.get(\"stop_token\", tokenizer.eod)\\n+ if stop_token is not None:\\n+ if not isinstance(stop_token, int):\\n+ raise ValueError(\"stop_token must be an integer\")\\n+\\n+ if length_penalty is None:\\n+ length_penalty = 1.0\\n+\\n+ sizes_list = None\\n+ prompts_tokens_tensor = None\\n+ prompts_length_tensor = None\\n+ if torch.distributed.get_rank() == 0:\\n+ # Get the prompts length.\\n+ if attention_mask is None:\\n+ prompts_length_tensor = torch.cuda.LongTensor([inputs.shape[1]] * inputs.shape[0])\\n+ else:\\n+ prompts_length_tensor = attention_mask.sum(axis=-1).cuda()\\n+\\n+ if max_new_tokens is None:\\n+ max_new_tokens = max_length - inputs.shape[1]\\n+ if max_new_tokens <= 0:\\n+ raise ValueError(\"max_new_tokens must be greater than 0\")\\n+\\n+ if add_BOS:\\n+ max_length = max_new_tokens + inputs.shape[1] + 1\\n+ # making sure that `max_length` is a multiple of 4 to leverage fused kernels\\n+ max_length = 4 * math.ceil(max_length / 4)\\n+ max_new_tokens = max_length - (inputs.shape[1] + 1)\\n+ padding = torch.cuda.LongTensor([[tokenizer.eod] * max_new_tokens] * inputs.shape[0])\\n+ prompts_tokens_tensor = torch.concat(\\n+ [torch.unsqueeze(padding[:, 0], axis=-1), inputs.cuda(), padding], axis=-1\\n+ )\\n+ else:\\n+ # making sure that `max_length` is a multiple of 4 to leverage fused kernels\\n+ max_length = max_new_tokens + inputs.shape[1]\\n+ max_length = 4 * math.ceil(max_length / 4)\\n+ max_new_tokens = max_length - inputs.shape[1]\\n+ padding = torch.cuda.LongTensor([[tokenizer.eod] * max_new_tokens] * inputs.shape[0])\\n+ prompts_tokens_tensor = torch.concat([inputs.cuda(), padding], axis=-1)\\n+\\n+ # We need the sizes of these tensors for the boradcast\\n+ sizes_list = [\\n+ prompts_tokens_tensor.size(0), # Batch size\\n+ prompts_tokens_tensor.size(1),\\n+ ] # Sequence lenght\\n+\\n+ # First, broadcast the sizes.\\n+ sizes_tensor = broadcast_int_list(2, int_list=sizes_list, rank=0)\\n+\\n+ # Now that we have the sizes, we can boradcast the tokens\\n+ # and length tensors.\\n+ sizes = sizes_tensor.tolist()\\n+ context_tokens_tensor = broadcast_tensor(sizes, torch.int64, tensor=prompts_tokens_tensor, rank=0)\\n+ context_length_tensor = broadcast_tensor(sizes[0], torch.int64, tensor=prompts_length_tensor, rank=0)\\n+\\n+ # Run the inference\\n+ random_seed = kwargs.get(\"random_seed\", 0)\\n+ torch.random.manual_seed(random_seed)\\n+ unwrapped_model = unwrap_model(self.base_model, (torchDDP, LocalDDP, Float16Module))\\n+ if beam_width is not None:\\n+ tokens, _ = beam_search_and_return_on_first_stage(\\n+ unwrapped_model,\\n+ context_tokens_tensor,\\n+ context_length_tensor,\\n+ beam_width,\\n+ stop_token=stop_token,\\n+ num_return_gen=1,\\n+ length_penalty=length_penalty,\\n+ )\\n+ else:\\n+ tokens, _, _ = generate_tokens_probs_and_return_on_first_stage(\\n+ unwrapped_model,\\n+ context_tokens_tensor,\\n+ context_length_tensor,\\n+ return_output_log_probs=False,\\n+ top_k=top_k,\\n+ top_p=top_p,\\n+ top_p_decay=top_p_decay,\\n+ top_p_bound=top_p_bound,\\n+ temperature=temperature,\\n+ use_eod_token_for_early_termination=True,\\n+ )\\n+ return tokens\\n+\\n \\n # other utilities\\n def avg_losses_across_data_parallel_group(losses):\\n@@ -1197,3 +1417,25 @@ def avg_losses_across_data_parallel_group(losses):\\n \"\"\"\\n \\n return average_losses_across_data_parallel_group(losses)\\n+\\n+\\n+def gather_across_data_parallel_groups(tensor):\\n+ \"\"\"\\n+ Recursively gather tensor in a nested list/tuple/dictionary of tensors from data parallel ranks.\\n+\\n+ Args:\\n+ tensor (nested list/tuple/dictionary of `torch.Tensor`):\\n+ The data to gather across data parallel ranks.\\n+\\n+ \"\"\"\\n+\\n+ def _gpu_gather_one(tensor):\\n+ if tensor.ndim == 0:\\n+ tensor = tensor.clone()[None]\\n+ output_tensors = [\\n+ tensor.clone() for _ in range(torch.distributed.get_world_size(group=mpu.get_data_parallel_group()))\\n+ ]\\n+ torch.distributed.all_gather(output_tensors, tensor, group=mpu.get_data_parallel_group())\\n+ return torch.cat(output_tensors, dim=0)\\n+\\n+ return recursively_apply(_gpu_gather_one, tensor, error_on_other_type=True)\\n',\n", - " 'code_comments': [{'body': 'Here you should lowercase, or use the str2bool conversion, just to make sure to catch variants.',\n", - " 'diff_hunk': '@@ -90,10 +91,11 @@ def is_bf16_available(ignore_tpu=False):\\n \\n \\n def is_megatron_lm_available():\\n- package_exists = importlib.util.find_spec(\"megatron\") is not None\\n- if package_exists:\\n- megatron_version = parse(importlib_metadata.version(\"megatron-lm\"))\\n- return compare_versions(megatron_version, \">=\", \"2.2.0\")\\n+ if os.environ.get(\"USE_MEGATRON_LM\", \"false\") == \"true\":',\n", - " 'from_author': False},\n", - " {'body': 'Fits on one line.',\n", - " 'diff_hunk': '@@ -1134,10 +1171,18 @@ def forward(self, **batch_data):\\n \\n loss = torch.tensor(0.0, device=args.local_rank)\\n for key in loss_dict:\\n- loss += loss_dict[key]\\n+ if len(loss_dict[key].shape) == 0:\\n+ loss += loss_dict[key]\\n+\\n+ logits = None\\n+ if \"logits\" in loss_dict:\\n+ logits = loss_dict[\"logits\"]\\n # loss = reduce(loss)\\n if self.train_step_handler.model_output_class is not None:\\n- return self.train_step_handler.model_output_class(loss=loss)\\n+ return self.train_step_handler.model_output_class(\\n+ loss=loss,\\n+ logits=logits,\\n+ )',\n", - " 'from_author': False},\n", - " {'body': 'Very reluctant to add this here with the same name as in Transformers. This method does not have the same signature/behavior so users will be very confused to get something different. ',\n", - " 'diff_hunk': '@@ -1186,6 +1231,184 @@ def load_checkpoint(self, input_dir):\\n if args.fp16 and self.iteration == 0:\\n self.optimizer.reload_model_params()\\n \\n+ def generate(',\n", - " 'from_author': False},\n", - " {'body': 'Done.',\n", - " 'diff_hunk': '@@ -90,10 +91,11 @@ def is_bf16_available(ignore_tpu=False):\\n \\n \\n def is_megatron_lm_available():\\n- package_exists = importlib.util.find_spec(\"megatron\") is not None\\n- if package_exists:\\n- megatron_version = parse(importlib_metadata.version(\"megatron-lm\"))\\n- return compare_versions(megatron_version, \">=\", \"2.2.0\")\\n+ if os.environ.get(\"USE_MEGATRON_LM\", \"false\") == \"true\":',\n", - " 'from_author': True},\n", - " {'body': 'Done.',\n", - " 'diff_hunk': '@@ -1134,10 +1171,18 @@ def forward(self, **batch_data):\\n \\n loss = torch.tensor(0.0, device=args.local_rank)\\n for key in loss_dict:\\n- loss += loss_dict[key]\\n+ if len(loss_dict[key].shape) == 0:\\n+ loss += loss_dict[key]\\n+\\n+ logits = None\\n+ if \"logits\" in loss_dict:\\n+ logits = loss_dict[\"logits\"]\\n # loss = reduce(loss)\\n if self.train_step_handler.model_output_class is not None:\\n- return self.train_step_handler.model_output_class(loss=loss)\\n+ return self.train_step_handler.model_output_class(\\n+ loss=loss,\\n+ logits=logits,\\n+ )',\n", - " 'from_author': True},\n", - " {'body': 'renamed to `megatron_generate`',\n", - " 'diff_hunk': '@@ -1186,6 +1231,184 @@ def load_checkpoint(self, input_dir):\\n if args.fp16 and self.iteration == 0:\\n self.optimizer.reload_model_params()\\n \\n+ def generate(',\n", - " 'from_author': True}],\n", - " 'context': [{'body': '_The documentation is not available anymore as the PR was closed or merged._',\n", - " 'from_author': False}],\n", - " 'url': 'https://api.github.com/repos/huggingface/accelerate/pulls/819'},\n", - " 1109536915: {'diff': 'diff --git a/docs/source/package_reference/cli.mdx b/docs/source/package_reference/cli.mdx\\nindex f7424d656..9643d3b4f 100644\\n--- a/docs/source/package_reference/cli.mdx\\n+++ b/docs/source/package_reference/cli.mdx\\n@@ -177,6 +177,37 @@ The following arguments are only useful when training in SageMaker\\n * `--aws_access_key_id AWS_ACCESS_KEY_ID` (`str`) -- The AWS_ACCESS_KEY_ID used to launch the Amazon SageMaker training job\\n * `--aws_secret_access_key AWS_SECRET_ACCESS_KEY` (`str`) -- The AWS_SECRET_ACCESS_KEY used to launch the Amazon SageMaker training job\\n \\n+## accelerate tpu-config\\n+\\n+`accelerate tpu-config`\\n+\\n+**Usage**:\\n+\\n+```bash\\n+accelerate tpu-config [arguments]\\n+```\\n+\\n+**Optional Arguments**:\\n+* `-h`, `--help` (`bool`) -- Show a help message and exit\\n+\\n+**Config Arguments**:\\n+\\n+Arguments that can be configured through `accelerate config`.\\n+\\n+* `--config_file` (`str`) -- Path to the config file to use for accelerate.\\n+* `--tpu_name` (`str`) -- The name of the TPU to use. If not specified, will use the TPU specified in the config file.\\n+* `--tpu_zone` (`str`) -- The zone of the TPU to use. If not specified, will use the zone specified in the config file.\\n+\\n+**TPU Arguments**:\\n+\\n+Arguments for options ran inside the TPU.\\n+\\n+* `--command_file` (`str`) -- The path to the file containing the commands to run on the pod on startup.\\n+* `--command` (`str`) -- A command to run on the pod. Can be passed multiple times.\\n+* `--install_accelerate` (`bool`) -- Whether to install accelerate on the pod. Defaults to False.\\n+* `--accelerate_version` (`str`) -- The version of accelerate to install on the pod. If not specified, will use the latest pypi version. Specify \\'dev\\' to install from GitHub.\\n+* `--debug` (`bool`) -- If set, will print the command that would be run instead of running it.\\n+\\n ## accelerate test\\n \\n `accelerate test` or `accelerate-test`\\ndiff --git a/src/accelerate/commands/accelerate_cli.py b/src/accelerate/commands/accelerate_cli.py\\nindex 515a66d1a..f0e76fd2c 100644\\n--- a/src/accelerate/commands/accelerate_cli.py\\n+++ b/src/accelerate/commands/accelerate_cli.py\\n@@ -19,8 +19,8 @@\\n from accelerate.commands.config import config_command_parser\\n from accelerate.commands.env import env_command_parser\\n from accelerate.commands.launch import launch_command_parser\\n-from accelerate.commands.pod import pod_command_parser\\n from accelerate.commands.test import test_command_parser\\n+from accelerate.commands.tpu import tpu_command_parser\\n \\n \\n def main():\\n@@ -31,7 +31,7 @@ def main():\\n config_command_parser(subparsers=subparsers)\\n env_command_parser(subparsers=subparsers)\\n launch_command_parser(subparsers=subparsers)\\n- pod_command_parser(subparsers=subparsers)\\n+ tpu_command_parser(subparsers=subparsers)\\n test_command_parser(subparsers=subparsers)\\n \\n # Let\\'s go\\ndiff --git a/src/accelerate/commands/pod.py b/src/accelerate/commands/tpu.py\\nsimilarity index 81%\\nrename from src/accelerate/commands/pod.py\\nrename to src/accelerate/commands/tpu.py\\nindex 87cd4f904..59bbb08e9 100644\\n--- a/src/accelerate/commands/pod.py\\n+++ b/src/accelerate/commands/tpu.py\\n@@ -22,70 +22,66 @@\\n from packaging.version import Version, parse\\n \\n \\n-_description = \"Run commands across a pod of TPU VMs for initial setup before running `accelerate launch`. Will also install Accelerate on the pod.\"\\n+_description = \"Run commands across TPU VMs for initial setup before running `accelerate launch`.\"\\n \\n \\n-def pod_command_parser(subparsers=None):\\n+def tpu_command_parser(subparsers=None):\\n if subparsers is not None:\\n- parser = subparsers.add_parser(\"pod-config\", description=_description)\\n+ parser = subparsers.add_parser(\"tpu-config\", description=_description)\\n else:\\n- parser = argparse.ArgumentParser(\"Accelerate pod-config command\", description=_description)\\n-\\n- parser.add_argument(\\n+ parser = argparse.ArgumentParser(\"Accelerate tpu-config command\", description=_description)\\n+ # Core arguments\\n+ config_args = parser.add_argument_group(\\n+ \"Config Arguments\", \"Arguments that can be configured through `accelerate config`.\"\\n+ )\\n+ config_args.add_argument(\\n \"--config_file\",\\n type=str,\\n default=None,\\n help=\"Path to the config file to use for accelerate.\",\\n )\\n-\\n- parser.add_argument(\\n- \"--pod_config_file\",\\n- type=str,\\n+ config_args.add_argument(\\n+ \"--tpu_name\",\\n default=None,\\n- help=\"Path to the config file to use for the pod.\",\\n+ help=\"The name of the TPU to use. If not specified, will use the TPU specified in the config file.\",\\n )\\n-\\n- parser.add_argument(\\n+ config_args.add_argument(\\n+ \"--tpu_zone\",\\n+ default=None,\\n+ help=\"The zone of the TPU to use. If not specified, will use the zone specified in the config file.\",\\n+ )\\n+ pod_args = parser.add_argument_group(\"TPU Arguments\", \"Arguments for options ran inside the TPU.\")\\n+ pod_args.add_argument(\\n \"--command_file\",\\n default=None,\\n help=\"The path to the file containing the commands to run on the pod on startup.\",\\n )\\n- parser.add_argument(\\n+ pod_args.add_argument(\\n \"--command\",\\n action=\"append\",\\n nargs=\"+\",\\n- help=\"A command to run on the pod. If not specified, will use the command specified in the command file.\",\\n- )\\n- parser.add_argument(\\n- \"--tpu_name\",\\n- default=None,\\n- help=\"The name of the TPU to use. If not specified, will use the TPU specified in the config file.\",\\n- )\\n- parser.add_argument(\\n- \"--tpu_zone\",\\n- default=None,\\n- help=\"The zone of the TPU to use. If not specified, will use the zone specified in the config file.\",\\n+ help=\"A command to run on the pod. Can be passed multiple times.\",\\n )\\n- parser.add_argument(\\n+ pod_args.add_argument(\\n \"--install_accelerate\",\\n action=\"store_true\",\\n help=\"Whether to install accelerate on the pod. Defaults to False.\",\\n )\\n- parser.add_argument(\\n+ pod_args.add_argument(\\n \"--accelerate_version\",\\n default=\"latest\",\\n help=\"The version of accelerate to install on the pod. If not specified, will use the latest pypi version. Specify \\'dev\\' to install from GitHub.\",\\n )\\n- parser.add_argument(\\n+ pod_args.add_argument(\\n \"--debug\", action=\"store_true\", help=\"If set, will print the command that would be run instead of running it.\"\\n )\\n \\n if subparsers is not None:\\n- parser.set_defaults(func=pod_launcher)\\n+ parser.set_defaults(func=tpu_command_launcher)\\n return parser\\n \\n \\n-def pod_launcher(args):\\n+def tpu_command_launcher(args):\\n defaults = None\\n \\n # Get the default from the config file if it exists.\\n@@ -146,7 +142,7 @@ def pod_launcher(args):\\n \\n \\n def main():\\n- parser = pod_command_parser()\\n+ parser = tpu_command_parser()\\n args = parser.parse_args()\\n \\n- pod_launcher(args)\\n+ tpu_command_launcher(args)\\ndiff --git a/tests/test_cli.py b/tests/test_cli.py\\nindex 2479f736d..50ed87d9f 100644\\n--- a/tests/test_cli.py\\n+++ b/tests/test_cli.py\\n@@ -66,15 +66,15 @@ def test_config_compatibility(self):\\n )\\n \\n \\n-class PodConfigTester(unittest.TestCase):\\n+class TpuConfigTester(unittest.TestCase):\\n \"\"\"\\n- Test case for verifying the `accelerate pod-config` CLI passes the right `gcloud` command.\\n+ Test case for verifying the `accelerate tpu-config` CLI passes the right `gcloud` command.\\n \"\"\"\\n \\n tpu_name = \"test-tpu\"\\n tpu_zone = \"us-central1-a\"\\n command = \"ls\"\\n- cmd = [\"accelerate\", \"pod-config\"]\\n+ cmd = [\"accelerate\", \"tpu-config\"]\\n base_output = \"cd /usr/share\"\\n command_file = \"tests/test_samples/test_command_file.sh\"\\n gcloud = \"Running gcloud compute tpus tpu-vm ssh\"\\n',\n", - " 'code_comments': [{'body': 'This arg was never used in the end, but not removed. ',\n", - " 'diff_hunk': '@@ -22,70 +22,66 @@\\n from packaging.version import Version, parse\\n \\n \\n-_description = \"Run commands across a pod of TPU VMs for initial setup before running `accelerate launch`. Will also install Accelerate on the pod.\"\\n+_description = \"Run commands across TPU VMs for initial setup before running `accelerate launch`.\"\\n \\n \\n-def pod_command_parser(subparsers=None):\\n+def tpu_command_parser(subparsers=None):\\n if subparsers is not None:\\n- parser = subparsers.add_parser(\"pod-config\", description=_description)\\n+ parser = subparsers.add_parser(\"tpu-config\", description=_description)\\n else:\\n- parser = argparse.ArgumentParser(\"Accelerate pod-config command\", description=_description)\\n-\\n- parser.add_argument(\\n+ parser = argparse.ArgumentParser(\"Accelerate tpu-config command\", description=_description)\\n+ # Core arguments\\n+ config_args = parser.add_argument_group(\\n+ \"Config Arguments\", \"Arguments that can be configured through `accelerate config`.\"\\n+ )\\n+ config_args.add_argument(\\n \"--config_file\",\\n type=str,\\n default=None,\\n help=\"Path to the config file to use for accelerate.\",\\n )\\n-\\n- parser.add_argument(\\n- \"--pod_config_file\",\\n- type=str,',\n", - " 'from_author': True}],\n", - " 'context': [{'body': '_The documentation is not available anymore as the PR was closed or merged._',\n", - " 'from_author': False}],\n", - " 'url': 'https://api.github.com/repos/huggingface/accelerate/pulls/818'},\n", - " 1108444656: {'diff': 'diff --git a/docs/source/package_reference/cli.mdx b/docs/source/package_reference/cli.mdx\\nindex 52e8ecfb6..f7424d656 100644\\n--- a/docs/source/package_reference/cli.mdx\\n+++ b/docs/source/package_reference/cli.mdx\\n@@ -78,63 +78,102 @@ accelerate launch [arguments] {training_script} --{training_script-argument-1} -\\n \\n * `-h`, `--help` (`bool`) -- Show a help message and exit\\n * `--config_file CONFIG_FILE` (`str`)-- The config file to use for the default values in the launching script.\\n-* `--cpu` (`bool`) -- Whether or not to force the training on the CPU.\\n-* `--mixed_precision {no,fp16,bf16}` (`str`) -- Whether or not to use mixed precision training. Choose between FP16 and BF16 (bfloat16) training. BF16 training is only supported on\\n- Nvidia Ampere GPUs and PyTorch 1.10 or later.\\n-* `--multi_gpu` (`bool`, defaults to `False`) -- Whether or not this should launch a distributed GPU training.\\n * `-m`, `--module` (`bool`) -- Change each process to interpret the launch script as a Python module, executing with the same behavior as \\'python -m\\'.\\n * `--no_python` (`bool`) -- Skip prepending the training script with \\'python\\' - just execute it directly. Useful when the script is not a Python script.\\n+* `--debug` (`bool`) -- Whether to print out the torch.distributed stack trace when something fails.\\n \\n The rest of these arguments are configured through `accelerate config` and are read in from the specified `--config_file` (or default configuration) for their \\n values. They can also be passed in manually.\\n \\n-**Machine Configuration Arguments**:\\n+**Hardware Selection Arguments**:\\n \\n-The following arguments are useful for customization of worker machines\\n-* `--machine_rank MACHINE_RANK` (`int`) -- The rank of the machine on which this script is launched.\\n-* `--num_machines NUM_MACHINES` (`int`) -- The total number of machines used in this training.\\n+* `--cpu` (`bool`) -- Whether or not to force the training on the CPU.\\n+* `--multi_gpu` (`bool`) -- Whether or not this should launch a distributed GPU training.\\n+* `--mps` (`bool`) -- Whether or not this should use MPS-enabled GPU device on MacOS machines.\\n+* `--tpu` (`bool`) -- Whether or not this should launch a TPU training.\\n+\\n+**Resource Selection Arguments**:\\n+\\n+The following arguments are useful for fine-tuning how available hardware should be used\\n+\\n+* `--mixed_precision {no,fp16,bf16}` (`str`) -- Whether or not to use mixed precision training. Choose between FP16 and BF16 (bfloat16) training. BF16 training is only supported on Nvidia Ampere GPUs and PyTorch 1.10 or later.\\n * `--num_processes NUM_PROCESSES` (`int`) -- The total number of processes to be launched in parallel.\\n+* `--num_machines NUM_MACHINES` (`int`) -- The total number of machines used in this training.\\n+* `--num_cpu_threads_per_process NUM_CPU_THREADS_PER_PROCESS` (`int`) -- The number of CPU threads per process. Can be tuned for optimal performance.\\n+\\n+**Training Paradigm Arguments**:\\n+\\n+The following arguments are useful for selecting which training paradigm to use.\\n+\\n+* `--use_deepspeed` (`bool`) -- Whether or not to use DeepSpeed for training.\\n+* `--use_fsdp` (`bool`) -- Whether or not to use FullyShardedDataParallel for training.\\n+* `--use_megatron_lm` (`bool`) -- Whether or not to use Megatron-LM for training.\\n+\\n+**Distributed GPU Arguments**:\\n+\\n+The following arguments are only useful when `multi_gpu` is passed or multi-gpu training is configured through `accelerate config`: \\n+\\n * `--gpu_ids` (`str`) -- What GPUs (by id) should be used for training on this machine as a comma-seperated list\\n * `--same_network` (`bool`) -- Whether all machines used for multinode training exist on the same local network.\\n+* `--machine_rank MACHINE_RANK` (`int`) -- The rank of the machine on which this script is launched.\\n * `--main_process_ip MAIN_PROCESS_IP` (`str`) -- The IP address of the machine of rank 0.\\n * `--main_process_port MAIN_PROCESS_PORT` (`int`) -- The port to use to communicate with the machine of rank 0.\\n * `--rdzv_conf` (`str`) -- Additional rendezvous configuration (=,=,...).\\n-* `--num_cpu_threads_per_process NUM_CPU_THREADS_PER_PROCESS` (`int`) -- The number of CPU threads per process. Can be tuned for optimal performance.\\n * `--max_restarts` (`int`) -- Maximum number of worker group restarts before failing.\\n * `--monitor_interval` (`float`) -- Interval, in seconds, to monitor the state of workers.\\n \\n+**TPU Arguments**:\\n+\\n+The following arguments are only useful when `tpu` is passed or TPU training is configured through `accelerate config`: \\n+\\n+* `--main_training_function MAIN_TRAINING_FUNCTION` (`str`) -- The name of the main function to be executed in your script.\\n+* `--downcast_bf16` (`bool`) -- Whether when using bf16 precision on TPUs if both float and double tensors are cast to bfloat16 or if double tensors remain as float32.\\n+\\n **DeepSpeed Arguments**:\\n \\n-The following arguments are only useful when `use_deepspeed` is passed: \\n-* `--use_deepspeed` (`bool`) -- Whether to use deepspeed.\\n-* `--deepspeed_config_file DEEPSPEED_CONFIG_FILE` (`str`) -- DeepSpeed config file.\\n-* `--zero_stage ZERO_STAGE` (`str`) -- DeepSpeed\\'s ZeRO optimization stage\\n-* `--offload_optimizer_device OFFLOAD_OPTIMIZER_DEVICE` (`str`) -- Decides where (none|cpu|nvme) to offload optimizer states\\n-* `--offload_param_device OFFLOAD_PARAM_DEVICE` (`str`) -- Decides where (none|cpu|nvme) to offload parameters\\n-* `--gradient_accumulation_steps GRADIENT_ACCUMULATION_STEPS` (`int`) -- Number of gradient_accumulation_steps used in your training script\\n-* `--gradient_clipping GRADIENT_CLIPPING` (`float`) -- gradient clipping value used in your training script\\n-The following arguments are related to using ZeRO Stage-3\\n-* `--zero3_init_flag ZERO3_INIT_FLAG` (`bool`) -- Decides Whether (true|false) to enable `deepspeed.zero.Init` for constructing massive models\\n-* `--zero3_save_16bit_model ZERO3_SAVE_16BIT_MODEL` (`bool`) -- Decides Whether (true|false) to save 16-bit model weights when using ZeRO Stage-3\\n+The following arguments are only useful when `use_deepspeed` is passed or `deepspeed` is configured through `accelerate config`: \\n+\\n+* `--deepspeed_config_file` (`str`) -- DeepSpeed config file.\\n+* `--zero_stage` (`int`) -- DeepSpeed\\'s ZeRO optimization stage.\\n+* `--offload_optimizer_device` (`str`) -- Decides where (none|cpu|nvme) to offload optimizer states.\\n+* `--offload_param_device` (`str`) -- Decides where (none|cpu|nvme) to offload parameters.\\n+* `--gradient_accumulation_steps` (`int`) -- No of gradient_accumulation_steps used in your training script.\\n+* `--gradient_clipping` (`float`) -- Gradient clipping value used in your training script.\\n+* `--zero3_init_flag` (`str`) -- Decides Whether (true|false) to enable `deepspeed.zero.Init` for constructing massive models. Only applicable with DeepSpeed ZeRO Stage-3.\\n+* `--zero3_save_16bit_model` (`str`) -- Decides Whether (true|false) to save 16-bit model weights when using ZeRO Stage-3. Only applicable with DeepSpeed ZeRO Stage-3.\\n+* `--deepspeed_hostfile` (`str`) -- DeepSpeed hostfile for configuring multi-node compute resources.\\n+* `--deepspeed_exclusion_filter` (`str`) -- DeepSpeed exclusion filter string when using mutli-node setup.\\n+* `--deepspeed_inclusion_filter` (`str`) -- DeepSpeed inclusion filter string when using mutli-node setup.\\n+* `--deepspeed_multinode_launcher` (`str`) -- DeepSpeed multi-node launcher to use.\\n \\n **Fully Sharded Data Parallelism Arguments**:\\n \\n-The following arguments are only useful when `use_fdsp` is passed:\\n-* `--use_fsdp` (`bool`) -- Whether to use fsdp.\\n-* `--offload_params OFFLOAD_PARAMS` (`bool`) -- Decides Whether (true|false) to offload parameters and gradients to CPU.\\n-* `--min_num_params MIN_NUM_PARAMS` (`int`) -- FSDP\\'s minimum number of parameters for Default Auto Wrapping.\\n-* `--sharding_strategy SHARDING_STRATEGY` (`str`) -- FSDP\\'s Sharding Strategy.\\n+The following arguments are only useful when `use_fdsp` is passed or Fully Sharded Data Parallelism is configured through `accelerate config`:\\n \\n-**TPU Arguments**:\\n+* `--fsdp_offload_params` (`str`) -- Decides Whether (true|false) to offload parameters and gradients to CPU.\\n+* `--fsdp_min_num_params` (`int`) -- FSDP\\'s minimum number of parameters for Default Auto Wrapping.\\n+* `--fsdp_sharding_strategy` (`int`) -- FSDP\\'s Sharding Strategy.\\n+* `--fsdp_auto_wrap_policy` (`str`) -- FSDP\\'s auto wrap policy.\\n+* `--fsdp_transformer_layer_cls_to_wrap` (`str`) -- Transformer layer class name (case-sensitive) to wrap, e.g, `BertLayer`, `GPTJBlock`, `T5Block` ...\\n+* `--fsdp_backward_prefetch_policy` (`str`) -- FSDP\\'s backward prefetch policy.\\n+* `--fsdp_state_dict_type` (`str`) -- FSDP\\'s state dict type.\\n \\n-The following arguments are only useful when `tpu` is passed:\\n-* `--tpu` (`bool`) -- Whether or not this should launch a TPU training.\\n-* `--main_training_function MAIN_TRAINING_FUNCTION` (`str`) -- The name of the main function to be executed in your script.\\n-* `--downcast_bf16` (`bool`) -- Whether when using bf16 precision on TPUs if both float and double tensors are cast to bfloat16 or if double tensors remain as float32.\\n+**Megatron-LM Arguments**:\\n+\\n+The following arguments are only useful when `use_megatron_lm` is passed or Megatron-LM is configured through `accelerate config`:\\n+\\n+* `--megatron_lm_tp_degree` (``) -- Megatron-LM\\'s Tensor Parallelism (TP) degree.\\n+* `--megatron_lm_pp_degree` (``) -- Megatron-LM\\'s Pipeline Parallelism (PP) degree.\\n+* `--megatron_lm_num_micro_batches` (``) -- Megatron-LM\\'s number of micro batches when PP degree > 1.\\n+* `--megatron_lm_sequence_parallelism` (``) -- Decides Whether (true|false) to enable Sequence Parallelism when TP degree > 1.\\n+* `--megatron_lm_recompute_activations` (``) -- Decides Whether (true|false) to enable Selective Activation Recomputation.\\n+* `--megatron_lm_use_distributed_optimizer` (``) -- Decides Whether (true|false) to use distributed optimizer which shards optimizer state and gradients across Data Pralellel (DP) ranks.\\n+* `--megatron_lm_gradient_clipping` (``) -- Megatron-LM\\'s gradient clipping value based on global L2 Norm (0 to disable).\\n \\n **AWS SageMaker Arguments**:\\n \\n The following arguments are only useful when training in SageMaker\\n+\\n * `--aws_access_key_id AWS_ACCESS_KEY_ID` (`str`) -- The AWS_ACCESS_KEY_ID used to launch the Amazon SageMaker training job\\n * `--aws_secret_access_key AWS_SECRET_ACCESS_KEY` (`str`) -- The AWS_SECRET_ACCESS_KEY used to launch the Amazon SageMaker training job\\n \\ndiff --git a/src/accelerate/commands/launch.py b/src/accelerate/commands/launch.py\\nindex 415b06906..46d95c713 100644\\n--- a/src/accelerate/commands/launch.py\\n+++ b/src/accelerate/commands/launch.py\\n@@ -64,6 +64,7 @@\\n options_to_group = {\\n \"--multi-gpu\": \"Distributed GPUs\",\\n \"--tpu\": \"TPU\",\\n+ \"--mps\": \"MPS\",\\n \"--use_mps_device\": \"MPS\",\\n \"--use_deepspeed\": \"DeepSpeed Arguments\",\\n \"--use_fsdp\": \"FSDP Arguments\",\\n@@ -144,6 +145,12 @@ def launch_command_parser(subparsers=None):\\n hardware_args.add_argument(\\n \"--cpu\", default=False, action=\"store_true\", help=\"Whether or not to force the training on the CPU.\"\\n )\\n+ hardware_args.add_argument(\\n+ \"--mps\",\\n+ default=False,\\n+ action=\"store_true\",\\n+ help=\"Whether or not this should use MPS-enabled GPU device on MacOS machines.\",\\n+ )\\n hardware_args.add_argument(\\n \"--multi_gpu\",\\n default=False,\\n@@ -157,7 +164,7 @@ def launch_command_parser(subparsers=None):\\n \"--use_mps_device\",\\n default=False,\\n action=\"store_true\",\\n- help=\"Whether or not this should use MPS-enabled GPU device on MacOS machines.\",\\n+ help=\"This argument is deprecated, use `--mps` instead.\",\\n )\\n \\n # Resource selection arguments\\n@@ -506,8 +513,14 @@ def simple_launcher(args):\\n \\n current_env = os.environ.copy()\\n current_env[\"USE_CPU\"] = str(args.cpu or args.use_cpu)\\n- current_env[\"USE_MPS_DEVICE\"] = str(args.use_mps_device)\\n if args.use_mps_device:\\n+ warnings.warn(\\n+ \\'`use_mps_device` flag is deprecated and will be removed in version 0.15.0 of 🤗 Accelerate. Use \"--mps\" instead.\\',\\n+ FutureWarning,\\n+ )\\n+ args.mps = True\\n+ current_env[\"USE_MPS_DEVICE\"] = str(args.mps)\\n+ if args.mps:\\n current_env[\"PYTORCH_ENABLE_MPS_FALLBACK\"] = \"1\"\\n elif args.gpu_ids != \"all\" and args.gpu_ids is not None:\\n current_env[\"CUDA_VISIBLE_DEVICES\"] = args.gpu_ids\\n@@ -704,7 +717,10 @@ def deepspeed_launcher(args):\\n )\\n \\n if args.fp16:\\n- warnings.warn(\\'--fp16 flag is deprecated. Use \"--mixed_precision fp16\" instead.\\', FutureWarning)\\n+ warnings.warn(\\n+ \\'--fp16 flag is deprecated and will be removed in version 0.15.0 of 🤗 Accelerate. Use \"--mixed_precision fp16\" instead.\\',\\n+ FutureWarning,\\n+ )\\n mixed_precision = \"fp16\"\\n \\n current_env[\"PYTHONPATH\"] = env_var_path_add(\"PYTHONPATH\", os.path.abspath(\".\"))\\n@@ -955,18 +971,18 @@ def launch_command(args):\\n if (\\n not args.multi_gpu\\n and not args.tpu\\n+ and not args.mps\\n and not args.use_deepspeed\\n and not args.use_fsdp\\n- and not args.use_mps_device\\n and not args.use_megatron_lm\\n ):\\n args.use_deepspeed = defaults.distributed_type == DistributedType.DEEPSPEED\\n args.multi_gpu = defaults.distributed_type == DistributedType.MULTI_GPU\\n args.tpu = defaults.distributed_type == DistributedType.TPU\\n args.use_fsdp = defaults.distributed_type == DistributedType.FSDP\\n- args.use_mps_device = defaults.distributed_type == DistributedType.MPS\\n+ args.mps = defaults.distributed_type == DistributedType.MPS\\n args.use_megatron_lm = defaults.distributed_type == DistributedType.MEGATRON_LM\\n- if not args.use_mps_device:\\n+ if not args.mps:\\n if args.gpu_ids is None:\\n if defaults.gpu_ids is not None:\\n args.gpu_ids = defaults.gpu_ids\\n',\n", - " 'code_comments': [],\n", - " 'context': [{'body': '_The documentation is not available anymore as the PR was closed or merged._',\n", - " 'from_author': False}],\n", - " 'url': 'https://api.github.com/repos/huggingface/accelerate/pulls/814'},\n", - " 1108328128: {'diff': 'diff --git a/src/accelerate/accelerator.py b/src/accelerate/accelerator.py\\nindex 287e6899a..63ea4a71b 100644\\n--- a/src/accelerate/accelerator.py\\n+++ b/src/accelerate/accelerator.py\\n@@ -213,7 +213,10 @@ def __init__(\\n )\\n \\n if fp16:\\n- warnings.warn(\\'fp16=True is deprecated. Use mixed_precision=\"fp16\" instead.\\', DeprecationWarning)\\n+ warnings.warn(\\n+ \"`fp16=True` is deprecated and will be removed in version 0.15.0 of 🤗 Accelerate. Use `mixed_precision=\\'fp16\\'` instead.\",\\n+ FutureWarning,\\n+ )\\n mixed_precision = \"fp16\"\\n \\n if deepspeed_plugin is None: # init from env variables\\ndiff --git a/src/accelerate/commands/launch.py b/src/accelerate/commands/launch.py\\nindex df4f703c3..415b06906 100644\\n--- a/src/accelerate/commands/launch.py\\n+++ b/src/accelerate/commands/launch.py\\n@@ -173,7 +173,10 @@ def launch_command_parser(subparsers=None):\\n \"BF16 training is only supported on Nvidia Ampere GPUs and PyTorch 1.10 or later.\",\\n )\\n resource_args.add_argument(\\n- \"--fp16\", default=False, action=\"store_true\", help=\"Whether or not to use mixed precision training.\"\\n+ \"--fp16\",\\n+ default=False,\\n+ action=\"store_true\",\\n+ help=\"This argument is deprecated, use `--mixed_precision fp16` instead.\",\\n )\\n resource_args.add_argument(\\n \"--num_processes\", type=int, default=None, help=\"The total number of processes to be launched in parallel.\"\\n@@ -403,30 +406,6 @@ def launch_command_parser(subparsers=None):\\n type=str,\\n help=\"FSDP\\'s state dict type. (useful only when `use_fsdp` flag is passed).\",\\n )\\n- fsdp_args.add_argument(\\n- \"--offload_params\",\\n- default=None,\\n- type=str,\\n- help=\"This argument is deprecated. Use `fsdp_offload_params` instead.\",\\n- )\\n- fsdp_args.add_argument(\\n- \"--min_num_params\",\\n- type=int,\\n- default=None,\\n- help=\"This argument is deprecated. Use `fsdp_min_num_params` instead.\",\\n- )\\n- fsdp_args.add_argument(\\n- \"--sharding_strategy\",\\n- type=int,\\n- default=None,\\n- help=\"This argument is deprecated. Use `fsdp_sharding_strategy` instead.\",\\n- )\\n- fsdp_args.add_argument(\\n- \"--transformer_layer_cls_to_wrap\",\\n- default=None,\\n- type=str,\\n- help=\"This argument is deprecated. Use `fsdp_transformer_layer_cls_to_wrap` instead.\",\\n- )\\n \\n # megatron_lm args\\n megatron_lm_args = parser.add_argument_group(\"Megatron-LM Arguments\", \"Arguments related to Megatron-LM.\")\\n@@ -547,7 +526,10 @@ def simple_launcher(args):\\n )\\n \\n if args.fp16:\\n- warnings.warn(\\'--fp16 flag is deprecated. Use \"--mixed_precision fp16\" instead.\\', DeprecationWarning)\\n+ warnings.warn(\\n+ \"`fp16` is deprecated and will be removed in version 0.15.0 of 🤗 Accelerate. Use `mixed_precision fp16` instead.\",\\n+ FutureWarning,\\n+ )\\n mixed_precision = \"fp16\"\\n \\n current_env[\"MIXED_PRECISION\"] = str(mixed_precision)\\n@@ -596,43 +578,14 @@ def multi_gpu_launcher(args):\\n raise ValueError(f\"Unknown mixed_precision mode: {mixed_precision}. Choose between {PrecisionType.list()}.\")\\n \\n if args.fp16:\\n- warnings.warn(\\'--fp16 flag is deprecated. Use \"--mixed_precision fp16\" instead.\\', DeprecationWarning)\\n+ warnings.warn(\\n+ \"`fp16` is deprecated and will be removed in version 0.15.0 of 🤗 Accelerate. Use `mixed_precision fp16` instead.\",\\n+ FutureWarning,\\n+ )\\n mixed_precision = \"fp16\"\\n \\n current_env[\"MIXED_PRECISION\"] = str(mixed_precision)\\n if args.use_fsdp:\\n- if args.sharding_strategy is not None:\\n- warnings.warn(\\n- \"`sharding_strategy` is deprecated and will be removed in version 0.13.0 of 🤗 Accelerate. Use\"\\n- \" `fsdp_sharding_strategy` instead\",\\n- FutureWarning,\\n- )\\n- args.fsdp_sharding_strategy = args.sharding_strategy\\n-\\n- if args.offload_params is not None:\\n- warnings.warn(\\n- \"`offload_params` is deprecated and will be removed in version 0.13.0 of 🤗 Accelerate. Use\"\\n- \" `fsdp_offload_params` instead\",\\n- FutureWarning,\\n- )\\n- args.fsdp_offload_params = args.offload_params\\n-\\n- if args.min_num_params is not None:\\n- warnings.warn(\\n- \"`min_num_params` is deprecated and will be removed in version 0.13.0 of 🤗 Accelerate. Use\"\\n- \" `fsdp_min_num_params` instead\",\\n- FutureWarning,\\n- )\\n- args.fsdp_min_num_params = args.min_num_params\\n-\\n- if args.transformer_layer_cls_to_wrap is not None:\\n- warnings.warn(\\n- \"`transformer_layer_cls_to_wrap` is deprecated and will be removed in version 0.13.0 of 🤗 Accelerate. Use\"\\n- \" `fsdp_transformer_layer_cls_to_wrap` instead\",\\n- FutureWarning,\\n- )\\n- args.fsdp_transformer_layer_cls_to_wrap = args.transformer_layer_cls_to_wrap\\n-\\n current_env[\"USE_FSDP\"] = \"true\"\\n current_env[\"FSDP_SHARDING_STRATEGY\"] = str(args.fsdp_sharding_strategy)\\n current_env[\"FSDP_OFFLOAD_PARAMS\"] = str(args.fsdp_offload_params).lower()\\n@@ -751,7 +704,7 @@ def deepspeed_launcher(args):\\n )\\n \\n if args.fp16:\\n- warnings.warn(\\'--fp16 flag is deprecated. Use \"--mixed_precision fp16\" instead.\\', DeprecationWarning)\\n+ warnings.warn(\\'--fp16 flag is deprecated. Use \"--mixed_precision fp16\" instead.\\', FutureWarning)\\n mixed_precision = \"fp16\"\\n \\n current_env[\"PYTHONPATH\"] = env_var_path_add(\"PYTHONPATH\", os.path.abspath(\".\"))\\n@@ -921,7 +874,7 @@ def sagemaker_launcher(sagemaker_config: SageMakerConfig, args):\\n )\\n \\n if args.fp16:\\n- warnings.warn(\\'--fp16 flag is deprecated. Use \"--mixed_precision fp16\" instead.\\', DeprecationWarning)\\n+ warnings.warn(\\'--fp16 flag is deprecated. Use \"--mixed_precision fp16\" instead.\\', FutureWarning)\\n mixed_precision = \"fp16\"\\n \\n # Environment variables to be set for use during training job\\ndiff --git a/src/accelerate/launchers.py b/src/accelerate/launchers.py\\nindex ea8fffa51..4de8b5858 100644\\n--- a/src/accelerate/launchers.py\\n+++ b/src/accelerate/launchers.py\\n@@ -105,7 +105,10 @@ def notebook_launcher(function, args=(), num_processes=None, use_fp16=False, mix\\n )\\n \\n if use_fp16:\\n- warnings.warn(\\'use_fp16=True is deprecated. Use mixed_precision=\"fp16\" instead.\\', DeprecationWarning)\\n+ warnings.warn(\\n+ \"`fp16=True` is deprecated and will be removed in version 0.15.0 of 🤗 Accelerate. Use `mixed_precision=\\'fp16\\'` instead.\",\\n+ FutureWarning,\\n+ )\\n mixed_precision = \"fp16\"\\n \\n # torch.distributed will expect a few environment variable to be here. We set the ones common to each\\n',\n", - " 'code_comments': [],\n", - " 'context': [{'body': '_The documentation is not available anymore as the PR was closed or merged._',\n", - " 'from_author': False}],\n", - " 'url': 'https://api.github.com/repos/huggingface/accelerate/pulls/813'},\n", - " 1108296843: {'diff': 'diff --git a/src/accelerate/hooks.py b/src/accelerate/hooks.py\\nindex 120bb7f42..ef429a369 100644\\n--- a/src/accelerate/hooks.py\\n+++ b/src/accelerate/hooks.py\\n@@ -160,17 +160,19 @@ def new_forward(*args, **kwargs):\\n return module\\n \\n \\n-def remove_hook_from_module(module: nn.Module):\\n+def remove_hook_from_module(module: nn.Module, recurse=False):\\n \"\"\"\\n Removes any hook attached to a module via `add_hook_to_module`.\\n \\n Args:\\n module (`torch.nn.Module`): The module to attach a hook to.\\n+ recurse (`bool`, **optional**): Whether to remove the hooks recursively\\n \\n Returns:\\n `torch.nn.Module`: The same module, with the hook detached (the module is modified in place, so the result can\\n be discarded).\\n \"\"\"\\n+\\n if hasattr(module, \"_hf_hook\"):\\n module._hf_hook.detach_hook(module)\\n delattr(module, \"_hf_hook\")\\n@@ -179,6 +181,10 @@ def remove_hook_from_module(module: nn.Module):\\n module.forward = module._old_forward\\n delattr(module, \"_old_forward\")\\n \\n+ if recurse:\\n+ for child in module.children():\\n+ remove_hook_from_module(child, recurse)\\n+\\n return module\\n \\n \\n',\n", - " 'code_comments': [{'body': 'Not sure the check is useful (should be >0 but the loop will just be empty)\\r\\n```suggestion\\r\\n if recurse:\\r\\n```',\n", - " 'diff_hunk': '@@ -179,6 +181,10 @@ def remove_hook_from_module(module: nn.Module):\\n module.forward = module._old_forward\\n delattr(module, \"_old_forward\")\\n \\n+ if recurse and len(list(module.children())) > 1:',\n", - " 'from_author': False},\n", - " {'body': \"```suggestion\\r\\n for child in module.children():\\r\\n```\\r\\nNo need to ask for the names if you don't want them ;-)\",\n", - " 'diff_hunk': '@@ -179,6 +181,10 @@ def remove_hook_from_module(module: nn.Module):\\n module.forward = module._old_forward\\n delattr(module, \"_old_forward\")\\n \\n+ if recurse and len(list(module.children())) > 1:\\n+ for _, child in module.named_children():',\n", - " 'from_author': False},\n", - " {'body': 'Nice catch! Yes indeed ;) ',\n", - " 'diff_hunk': '@@ -179,6 +181,10 @@ def remove_hook_from_module(module: nn.Module):\\n module.forward = module._old_forward\\n delattr(module, \"_old_forward\")\\n \\n+ if recurse and len(list(module.children())) > 1:',\n", - " 'from_author': True}],\n", - " 'context': [{'body': '_The documentation is not available anymore as the PR was closed or merged._',\n", - " 'from_author': False},\n", - " {'body': 'Thanks for the suggestion! Can confirm the `BetterTransformers` tests pass on my side ;) ',\n", - " 'from_author': True}],\n", - " 'url': 'https://api.github.com/repos/huggingface/accelerate/pulls/812'},\n", - " 1108011205: {'diff': 'diff --git a/src/accelerate/optimizer.py b/src/accelerate/optimizer.py\\nindex 4fad12c72..d5eeef99a 100644\\n--- a/src/accelerate/optimizer.py\\n+++ b/src/accelerate/optimizer.py\\n@@ -157,3 +157,9 @@ def is_overflow(self):\\n def step_was_skipped(self):\\n \"\"\"Whether or not the optimizer step was skipped.\"\"\"\\n return self._is_overflow\\n+\\n+ def __getstate__(self):\\n+ return self.__dict__.copy()\\n+\\n+ def __setstate__(self, state):\\n+ self.__dict__.update(state)\\ndiff --git a/tests/test_optimizer.py b/tests/test_optimizer.py\\nnew file mode 100644\\nindex 000000000..15a095bf7\\n--- /dev/null\\n+++ b/tests/test_optimizer.py\\n@@ -0,0 +1,36 @@\\n+# Copyright 2022 The HuggingFace Team. All rights reserved.\\n+#\\n+# Licensed under the Apache License, Version 2.0 (the \"License\");\\n+# you may not use this file except in compliance with the License.\\n+# You may obtain a copy of the License at\\n+#\\n+# http://www.apache.org/licenses/LICENSE-2.0\\n+#\\n+# Unless required by applicable law or agreed to in writing, software\\n+# distributed under the License is distributed on an \"AS IS\" BASIS,\\n+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\\n+# See the License for the specific language governing permissions and\\n+# limitations under the License.\\n+\\n+import pickle\\n+import unittest\\n+\\n+import torch\\n+\\n+from accelerate import Accelerator\\n+from accelerate.state import AcceleratorState\\n+from accelerate.test_utils import require_cpu\\n+\\n+\\n+@require_cpu\\n+class OptimizerTester(unittest.TestCase):\\n+ def test_accelerated_optimizer_pickling(self):\\n+ model = torch.nn.Linear(10, 10)\\n+ optimizer = torch.optim.SGD(model.parameters(), 0.1)\\n+ accelerator = Accelerator()\\n+ optimizer = accelerator.prepare(optimizer)\\n+ try:\\n+ pickle.loads(pickle.dumps(optimizer))\\n+ except Exception as e:\\n+ self.fail(f\"Accelerated optimizer pickling failed with {e}\")\\n+ AcceleratorState._reset_state()\\n',\n", - " 'code_comments': [],\n", - " 'context': [{'body': '_The documentation is not available anymore as the PR was closed or merged._',\n", - " 'from_author': False},\n", - " {'body': 'Perfect, thanks!', 'from_author': False}],\n", - " 'url': 'https://api.github.com/repos/huggingface/accelerate/pulls/811'},\n", - " 1107487241: {'diff': 'diff --git a/src/accelerate/commands/launch.py b/src/accelerate/commands/launch.py\\nindex 7c8534e93..df4f703c3 100644\\n--- a/src/accelerate/commands/launch.py\\n+++ b/src/accelerate/commands/launch.py\\n@@ -61,180 +61,367 @@\\n \\n logger = logging.getLogger(__name__)\\n \\n+options_to_group = {\\n+ \"--multi-gpu\": \"Distributed GPUs\",\\n+ \"--tpu\": \"TPU\",\\n+ \"--use_mps_device\": \"MPS\",\\n+ \"--use_deepspeed\": \"DeepSpeed Arguments\",\\n+ \"--use_fsdp\": \"FSDP Arguments\",\\n+ \"--use_megatron_lm\": \"Megatron-LM Arguments\",\\n+}\\n+\\n+\\n+def clean_option(option):\\n+ \"Finds all cases of - after the first two characters and changes them to _\"\\n+ if option.startswith(\"--\"):\\n+ return option[:3] + option[3:].replace(\"-\", \"_\")\\n+\\n+\\n+class _CustomHelpAction(argparse._HelpAction):\\n+ \"\"\"\\n+ This is a custom help action that will hide all arguments that are not used in the command line when the help is\\n+ called. This is useful for the case where the user is using a specific platform and only wants to see the arguments\\n+ for that platform.\\n+ \"\"\"\\n+\\n+ def __call__(self, parser, namespace, values, option_string=None):\\n+ if \"accelerate\" in sys.argv[0] and \"launch\" in sys.argv[1:]:\\n+ args = sys.argv[2:]\\n+ else:\\n+ args = sys.argv[1:]\\n+ opts = parser._actions\\n+ titles = [\\n+ \"Hardware Selection Arguments\",\\n+ \"Resource Selection Arguments\",\\n+ \"Training Paradigm Arguments\",\\n+ \"positional arguments\",\\n+ \"optional arguments\",\\n+ ]\\n+ if len(args) > 1:\\n+ used_platforms = [arg for arg in args if arg in options_to_group.keys()]\\n+ args = list(map(clean_option, args))\\n+ used_titles = [options_to_group[o] for o in used_platforms]\\n+ for i, arg in enumerate(opts):\\n+ # If the argument\\'s container is outside of the used titles, hide it\\n+ if arg.container.title not in titles + used_titles:\\n+ setattr(opts[i], \"help\", argparse.SUPPRESS)\\n+ # If the argument is hardware selection, but not being passed, hide it\\n+ elif arg.container.title == \"Hardware Selection Arguments\":\\n+ if set(arg.option_strings).isdisjoint(set(args)):\\n+ setattr(opts[i], \"help\", argparse.SUPPRESS)\\n+ else:\\n+ setattr(opts[i], \"help\", arg.help + \" (currently selected)\")\\n+ # If the argument is a training paradigm, but not being passed, hide it\\n+ elif arg.container.title == \"Training Paradigm Arguments\":\\n+ if set(arg.option_strings).isdisjoint(set(used_platforms)):\\n+ setattr(opts[i], \"help\", argparse.SUPPRESS)\\n+ else:\\n+ setattr(opts[i], \"help\", arg.help + \" (currently selected)\")\\n+ for i, group in enumerate(list(parser._action_groups)):\\n+ # If all arguments in the group are hidden, hide the group\\n+ if all([arg.help == argparse.SUPPRESS for arg in group._group_actions]):\\n+ parser._action_groups.remove(group)\\n+\\n+ super().__call__(parser, namespace, values, option_string)\\n+\\n \\n def launch_command_parser(subparsers=None):\\n if subparsers is not None:\\n- parser = subparsers.add_parser(\"launch\")\\n+ parser = subparsers.add_parser(\"launch\", add_help=False)\\n else:\\n- parser = argparse.ArgumentParser(\"Accelerate launch command\")\\n+ parser = argparse.ArgumentParser(\"Accelerate launch command\", add_help=False)\\n+\\n+ parser.register(\"action\", \"help\", _CustomHelpAction)\\n+ parser.add_argument(\"-h\", \"--help\", action=\"help\", help=\"Show this help message and exit.\")\\n \\n parser.add_argument(\\n \"--config_file\", default=None, help=\"The config file to use for the default values in the launching script.\"\\n )\\n- parser.add_argument(\\n+ # Hardware selection arguments\\n+ hardware_args = parser.add_argument_group(\\n+ \"Hardware Selection Arguments\", \"Arguments for selecting the hardware to be used.\"\\n+ )\\n+ hardware_args.add_argument(\\n+ \"--cpu\", default=False, action=\"store_true\", help=\"Whether or not to force the training on the CPU.\"\\n+ )\\n+ hardware_args.add_argument(\\n \"--multi_gpu\",\\n default=False,\\n action=\"store_true\",\\n help=\"Whether or not this should launch a distributed GPU training.\",\\n )\\n- parser.add_argument(\\n+ hardware_args.add_argument(\\n+ \"--tpu\", default=False, action=\"store_true\", help=\"Whether or not this should launch a TPU training.\"\\n+ )\\n+ hardware_args.add_argument(\\n \"--use_mps_device\",\\n default=False,\\n action=\"store_true\",\\n help=\"Whether or not this should use MPS-enabled GPU device on MacOS machines.\",\\n )\\n \\n- # deepspeed args\\n- parser.add_argument(\\n+ # Resource selection arguments\\n+ resource_args = parser.add_argument_group(\\n+ \"Resource Selection Arguments\", \"Arguments for fine-tuning how available hardware should be used.\"\\n+ )\\n+ resource_args.add_argument(\\n+ \"--mixed_precision\",\\n+ type=str,\\n+ choices=[\"no\", \"fp16\", \"bf16\"],\\n+ help=\"Whether or not to use mixed precision training. \"\\n+ \"Choose between FP16 and BF16 (bfloat16) training. \"\\n+ \"BF16 training is only supported on Nvidia Ampere GPUs and PyTorch 1.10 or later.\",\\n+ )\\n+ resource_args.add_argument(\\n+ \"--fp16\", default=False, action=\"store_true\", help=\"Whether or not to use mixed precision training.\"\\n+ )\\n+ resource_args.add_argument(\\n+ \"--num_processes\", type=int, default=None, help=\"The total number of processes to be launched in parallel.\"\\n+ )\\n+ resource_args.add_argument(\\n+ \"--num_machines\", type=int, default=None, help=\"The total number of machines used in this training.\"\\n+ )\\n+ resource_args.add_argument(\\n+ \"--num_cpu_threads_per_process\",\\n+ type=int,\\n+ default=None,\\n+ help=\"The number of CPU threads per process. Can be tuned for optimal performance.\",\\n+ )\\n+\\n+ # Training Paradigm arguments\\n+ paradigm_args = parser.add_argument_group(\\n+ \"Training Paradigm Arguments\", \"Arguments for selecting which training paradigm to be used.\"\\n+ )\\n+ paradigm_args.add_argument(\\n \"--use_deepspeed\",\\n default=False,\\n action=\"store_true\",\\n help=\"Whether to use deepspeed.\",\\n )\\n+ paradigm_args.add_argument(\\n+ \"--use_fsdp\",\\n+ default=False,\\n+ action=\"store_true\",\\n+ help=\"Whether to use fsdp.\",\\n+ )\\n+ paradigm_args.add_argument(\\n+ \"--use_megatron_lm\",\\n+ default=False,\\n+ action=\"store_true\",\\n+ help=\"Whether to use Megatron-LM.\",\\n+ )\\n+\\n+ # distributed GPU training arguments\\n+ distributed_args = parser.add_argument_group(\"Distributed GPUs\", \"Arguments related to distributed GPU training.\")\\n+ distributed_args.add_argument(\\n+ \"--gpu_ids\",\\n+ default=None,\\n+ help=\"What GPUs (by id) should be used for training on this machine as a comma-seperated list\",\\n+ )\\n+ distributed_args.add_argument(\\n+ \"--same_network\",\\n+ default=False,\\n+ action=\"store_true\",\\n+ help=\"Whether all machines used for multinode training exist on the same local network.\",\\n+ )\\n+ distributed_args.add_argument(\\n+ \"--machine_rank\", type=int, default=None, help=\"The rank of the machine on which this script is launched.\"\\n+ )\\n+ distributed_args.add_argument(\\n+ \"--main_process_ip\", type=str, default=None, help=\"The IP address of the machine of rank 0.\"\\n+ )\\n+ distributed_args.add_argument(\\n+ \"--main_process_port\",\\n+ type=int,\\n+ default=None,\\n+ help=\"The port to use to communicate with the machine of rank 0.\",\\n+ )\\n+ # Rendezvous related arguments\\n+ distributed_args.add_argument(\\n+ \"--rdzv_conf\",\\n+ type=str,\\n+ default=\"\",\\n+ help=\"Additional rendezvous configuration (=,=,...).\",\\n+ )\\n+ distributed_args.add_argument(\\n+ \"--max_restarts\",\\n+ type=int,\\n+ default=0,\\n+ help=\"Maximum number of worker group restarts before failing.\",\\n+ )\\n+ distributed_args.add_argument(\\n+ \"--monitor_interval\",\\n+ type=float,\\n+ default=5,\\n+ help=\"Interval, in seconds, to monitor the state of workers.\",\\n+ )\\n+ parser.add_argument(\\n+ \"-m\",\\n+ \"--module\",\\n+ action=\"store_true\",\\n+ help=\"Change each process to interpret the launch script as a Python module, executing with the same behavior as \\'python -m\\'.\",\\n+ )\\n parser.add_argument(\\n+ \"--no_python\",\\n+ action=\"store_true\",\\n+ help=\"Skip prepending the training script with \\'python\\' - just execute it directly. Useful when the script is not a Python script.\",\\n+ )\\n+\\n+ # tpu arguments\\n+ tpu_args = parser.add_argument_group(\"TPU\", \"Arguments related to TPU.\")\\n+ tpu_args.add_argument(\\n+ \"--main_training_function\",\\n+ type=str,\\n+ default=None,\\n+ help=\"The name of the main function to be executed in your script (only for TPU training).\",\\n+ )\\n+ tpu_args.add_argument(\\n+ \"--downcast_bf16\",\\n+ action=\"store_true\",\\n+ help=\"Whether when using bf16 precision on TPUs if both float and double tensors are cast to bfloat16 or if double tensors remain as float32.\",\\n+ )\\n+\\n+ # DeepSpeed arguments\\n+ deepspeed_args = parser.add_argument_group(\"DeepSpeed Arguments\", \"Arguments related to DeepSpeed.\")\\n+ deepspeed_args.add_argument(\\n \"--deepspeed_config_file\",\\n default=None,\\n type=str,\\n help=\"DeepSpeed config file.\",\\n )\\n- parser.add_argument(\\n+ deepspeed_args.add_argument(\\n \"--zero_stage\",\\n default=None,\\n type=int,\\n help=\"DeepSpeed\\'s ZeRO optimization stage (useful only when `use_deepspeed` flag is passed).\",\\n )\\n- parser.add_argument(\\n+ deepspeed_args.add_argument(\\n \"--offload_optimizer_device\",\\n default=None,\\n type=str,\\n help=\"Decides where (none|cpu|nvme) to offload optimizer states (useful only when `use_deepspeed` flag is passed).\",\\n )\\n- parser.add_argument(\\n+ deepspeed_args.add_argument(\\n \"--offload_param_device\",\\n default=None,\\n type=str,\\n help=\"Decides where (none|cpu|nvme) to offload parameters (useful only when `use_deepspeed` flag is passed).\",\\n )\\n- parser.add_argument(\\n+ deepspeed_args.add_argument(\\n \"--gradient_accumulation_steps\",\\n default=None,\\n type=int,\\n help=\"No of gradient_accumulation_steps used in your training script (useful only when `use_deepspeed` flag is passed).\",\\n )\\n- parser.add_argument(\\n+ deepspeed_args.add_argument(\\n \"--gradient_clipping\",\\n default=None,\\n type=float,\\n help=\"gradient clipping value used in your training script (useful only when `use_deepspeed` flag is passed).\",\\n )\\n- parser.add_argument(\\n+ deepspeed_args.add_argument(\\n \"--zero3_init_flag\",\\n default=None,\\n type=str,\\n help=\"Decides Whether (true|false) to enable `deepspeed.zero.Init` for constructing massive models. \"\\n \"Only applicable with DeepSpeed ZeRO Stage-3.\",\\n )\\n- parser.add_argument(\\n+ deepspeed_args.add_argument(\\n \"--zero3_save_16bit_model\",\\n default=None,\\n type=str,\\n help=\"Decides Whether (true|false) to save 16-bit model weights when using ZeRO Stage-3. \"\\n \"Only applicable with DeepSpeed ZeRO Stage-3.\",\\n )\\n- parser.add_argument(\\n+ deepspeed_args.add_argument(\\n \"--deepspeed_hostfile\",\\n default=None,\\n type=str,\\n help=\"DeepSpeed hostfile for configuring multi-node compute resources.\",\\n )\\n- parser.add_argument(\\n+ deepspeed_args.add_argument(\\n \"--deepspeed_exclusion_filter\",\\n default=None,\\n type=str,\\n help=\"DeepSpeed exclusion filter string when using mutli-node setup.\",\\n )\\n- parser.add_argument(\\n+ deepspeed_args.add_argument(\\n \"--deepspeed_inclusion_filter\",\\n default=None,\\n type=str,\\n help=\"DeepSpeed inclusion filter string when using mutli-node setup.\",\\n )\\n- parser.add_argument(\\n+ deepspeed_args.add_argument(\\n \"--deepspeed_multinode_launcher\",\\n default=None,\\n type=str,\\n help=\"DeepSpeed multi-node launcher to use.\",\\n )\\n \\n- # fsdp args\\n- parser.add_argument(\\n- \"--use_fsdp\",\\n- default=False,\\n- action=\"store_true\",\\n- help=\"Whether to use fsdp.\",\\n- )\\n- parser.add_argument(\\n+ # fsdp arguments\\n+ fsdp_args = parser.add_argument_group(\"FSDP Arguments\", \"Arguments related to Fully Shared Data Parallelism.\")\\n+ fsdp_args.add_argument(\\n \"--fsdp_offload_params\",\\n default=\"false\",\\n type=str,\\n help=\"Decides Whether (true|false) to offload parameters and gradients to CPU. (useful only when `use_fsdp` flag is passed).\",\\n )\\n- parser.add_argument(\\n+ fsdp_args.add_argument(\\n \"--fsdp_min_num_params\",\\n type=int,\\n default=1e8,\\n help=\"FSDP\\'s minimum number of parameters for Default Auto Wrapping. (useful only when `use_fsdp` flag is passed).\",\\n )\\n- parser.add_argument(\\n+ fsdp_args.add_argument(\\n \"--fsdp_sharding_strategy\",\\n type=int,\\n default=1,\\n help=\"FSDP\\'s Sharding Strategy. (useful only when `use_fsdp` flag is passed).\",\\n )\\n- parser.add_argument(\\n+ fsdp_args.add_argument(\\n \"--fsdp_auto_wrap_policy\",\\n type=str,\\n default=None,\\n help=\"FSDP\\'s auto wrap policy. (useful only when `use_fsdp` flag is passed).\",\\n )\\n- parser.add_argument(\\n+ fsdp_args.add_argument(\\n \"--fsdp_transformer_layer_cls_to_wrap\",\\n default=None,\\n type=str,\\n help=\"Transformer layer class name (case-sensitive) to wrap ,e.g, `BertLayer`, `GPTJBlock`, `T5Block` .... \"\\n \"(useful only when `use_fsdp` flag is passed).\",\\n )\\n- parser.add_argument(\\n+ fsdp_args.add_argument(\\n \"--fsdp_backward_prefetch_policy\",\\n default=None,\\n type=str,\\n help=\"FSDP\\'s backward prefetch policy. (useful only when `use_fsdp` flag is passed).\",\\n )\\n- parser.add_argument(\\n+ fsdp_args.add_argument(\\n \"--fsdp_state_dict_type\",\\n default=None,\\n type=str,\\n help=\"FSDP\\'s state dict type. (useful only when `use_fsdp` flag is passed).\",\\n )\\n- parser.add_argument(\\n+ fsdp_args.add_argument(\\n \"--offload_params\",\\n default=None,\\n type=str,\\n help=\"This argument is deprecated. Use `fsdp_offload_params` instead.\",\\n )\\n- parser.add_argument(\\n+ fsdp_args.add_argument(\\n \"--min_num_params\",\\n type=int,\\n default=None,\\n help=\"This argument is deprecated. Use `fsdp_min_num_params` instead.\",\\n )\\n- parser.add_argument(\\n+ fsdp_args.add_argument(\\n \"--sharding_strategy\",\\n type=int,\\n default=None,\\n help=\"This argument is deprecated. Use `fsdp_sharding_strategy` instead.\",\\n )\\n- parser.add_argument(\\n+ fsdp_args.add_argument(\\n \"--transformer_layer_cls_to_wrap\",\\n default=None,\\n type=str,\\n@@ -242,45 +429,40 @@ def launch_command_parser(subparsers=None):\\n )\\n \\n # megatron_lm args\\n- parser.add_argument(\\n- \"--use_megatron_lm\",\\n- default=False,\\n- action=\"store_true\",\\n- help=\"Whether to use Megatron-LM.\",\\n- )\\n- parser.add_argument(\\n+ megatron_lm_args = parser.add_argument_group(\"Megatron-LM Arguments\", \"Arguments related to Megatron-LM.\")\\n+ megatron_lm_args.add_argument(\\n \"--megatron_lm_tp_degree\",\\n type=int,\\n default=1,\\n help=\"Megatron-LM\\'s Tensor Parallelism (TP) degree. (useful only when `use_megatron_lm` flag is passed).\",\\n )\\n- parser.add_argument(\\n+ megatron_lm_args.add_argument(\\n \"--megatron_lm_pp_degree\",\\n type=int,\\n default=1,\\n help=\"Megatron-LM\\'s Pipeline Parallelism (PP) degree. (useful only when `use_megatron_lm` flag is passed).\",\\n )\\n- parser.add_argument(\\n+ megatron_lm_args.add_argument(\\n \"--megatron_lm_num_micro_batches\",\\n type=int,\\n default=None,\\n help=\"Megatron-LM\\'s number of micro batches when PP degree > 1. (useful only when `use_megatron_lm` flag is passed).\",\\n )\\n- parser.add_argument(\\n+ megatron_lm_args.add_argument(\\n \"--megatron_lm_sequence_parallelism\",\\n default=None,\\n type=str,\\n help=\"Decides Whether (true|false) to enable Sequence Parallelism when TP degree > 1. \"\\n \"(useful only when `use_megatron_lm` flag is passed).\",\\n )\\n- parser.add_argument(\\n+ megatron_lm_args.add_argument(\\n \"--megatron_lm_recompute_activations\",\\n default=None,\\n type=str,\\n help=\"Decides Whether (true|false) to enable Selective Activation Recomputation. \"\\n \"(useful only when `use_megatron_lm` flag is passed).\",\\n )\\n- parser.add_argument(\\n+ megatron_lm_args.add_argument(\\n \"--megatron_lm_use_distributed_optimizer\",\\n default=None,\\n type=str,\\n@@ -288,7 +470,7 @@ def launch_command_parser(subparsers=None):\\n \"which shards optimizer state and gradients across Data Pralellel (DP) ranks. \"\\n \"(useful only when `use_megatron_lm` flag is passed).\",\\n )\\n- parser.add_argument(\\n+ megatron_lm_args.add_argument(\\n \"--megatron_lm_gradient_clipping\",\\n default=1.0,\\n type=float,\\n@@ -296,105 +478,15 @@ def launch_command_parser(subparsers=None):\\n \"(useful only when `use_megatron_lm` flag is passed).\",\\n )\\n \\n- parser.add_argument(\\n- \"--tpu\", default=False, action=\"store_true\", help=\"Whether or not this should launch a TPU training.\"\\n- )\\n- parser.add_argument(\\n- \"--mixed_precision\",\\n- type=str,\\n- choices=[\"no\", \"fp16\", \"bf16\"],\\n- help=\"Whether or not to use mixed precision training. \"\\n- \"Choose between FP16 and BF16 (bfloat16) training. \"\\n- \"BF16 training is only supported on Nvidia Ampere GPUs and PyTorch 1.10 or later.\",\\n- )\\n-\\n- parser.add_argument(\\n- \"--fp16\", default=False, action=\"store_true\", help=\"Whether or not to use mixed precision training.\"\\n- )\\n- parser.add_argument(\\n- \"--cpu\", default=False, action=\"store_true\", help=\"Whether or not to force the training on the CPU.\"\\n- )\\n- parser.add_argument(\\n- \"--num_processes\", type=int, default=None, help=\"The total number of processes to be launched in parallel.\"\\n- )\\n- parser.add_argument(\\n- \"--num_machines\", type=int, default=None, help=\"The total number of machines used in this training.\"\\n- )\\n- parser.add_argument(\\n- \"--gpu_ids\",\\n- default=None,\\n- help=\"What GPUs (by id) should be used for training on this machine as a comma-seperated list\",\\n- )\\n- parser.add_argument(\\n- \"--same_network\",\\n- default=False,\\n- action=\"store_true\",\\n- help=\"Whether all machines used for multinode training exist on the same local network.\",\\n- )\\n- parser.add_argument(\\n- \"--machine_rank\", type=int, default=None, help=\"The rank of the machine on which this script is launched.\"\\n- )\\n- parser.add_argument(\"--main_process_ip\", type=str, default=None, help=\"The IP address of the machine of rank 0.\")\\n- parser.add_argument(\\n- \"--main_process_port\",\\n- type=int,\\n- default=None,\\n- help=\"The port to use to communicate with the machine of rank 0.\",\\n- )\\n- # Rendezvous related arguments\\n- parser.add_argument(\\n- \"--rdzv_conf\",\\n- type=str,\\n- default=\"\",\\n- help=\"Additional rendezvous configuration (=,=,...).\",\\n- )\\n- parser.add_argument(\\n- \"--max_restarts\",\\n- type=int,\\n- default=0,\\n- help=\"Maximum number of worker group restarts before failing.\",\\n- )\\n- parser.add_argument(\\n- \"--monitor_interval\",\\n- type=float,\\n- default=5,\\n- help=\"Interval, in seconds, to monitor the state of workers.\",\\n- )\\n- parser.add_argument(\\n- \"--main_training_function\",\\n- type=str,\\n- default=None,\\n- help=\"The name of the main function to be executed in your script (only for TPU training).\",\\n- )\\n- parser.add_argument(\\n- \"--downcast_bf16\",\\n- action=\"store_true\",\\n- help=\"Whether when using bf16 precision on TPUs if both float and double tensors are cast to bfloat16 or if double tensors remain as float32.\",\\n- )\\n- parser.add_argument(\\n- \"-m\",\\n- \"--module\",\\n- action=\"store_true\",\\n- help=\"Change each process to interpret the launch script as a Python module, executing with the same behavior as \\'python -m\\'.\",\\n- )\\n- parser.add_argument(\\n- \"--no_python\",\\n- action=\"store_true\",\\n- help=\"Skip prepending the training script with \\'python\\' - just execute it directly. Useful when the script is not a Python script.\",\\n- )\\n- parser.add_argument(\\n- \"--num_cpu_threads_per_process\",\\n- type=int,\\n- default=None,\\n- help=\"The number of CPU threads per process. Can be tuned for optimal performance.\",\\n- )\\n- parser.add_argument(\\n+ # AWS arguments\\n+ aws_args = parser.add_argument_group(\"AWS Arguments\", \"Arguments related to AWS.\")\\n+ aws_args.add_argument(\\n \"--aws_access_key_id\",\\n type=str,\\n default=None,\\n help=\"The AWS_ACCESS_KEY_ID used to launch the Amazon SageMaker training job\",\\n )\\n- parser.add_argument(\\n+ aws_args.add_argument(\\n \"--aws_secret_access_key\",\\n type=str,\\n default=None,\\n',\n", - " 'code_comments': [{'body': 'I hate this underscore convention in Python as it hurts readability and nothing is ever truly private. In Accelerate, like for Transformers, the convention is that things that are not in the main init are private. So no need to add all those _ :-)',\n", - " 'diff_hunk': '@@ -61,340 +61,430 @@\\n \\n logger = logging.getLogger(__name__)\\n \\n+_options_to_group = {\\n+ \"--multi-gpu\": \"Distributed GPUs\",\\n+ \"--tpu\": \"TPU\",\\n+ \"--use_mps_device\": \"MPS\",\\n+ \"--use_deepspeed\": \"DeepSpeed\",\\n+ \"--use_fsdp\": \"FSDP\",\\n+ \"--use_megatron_lm\": \"Megatron-LM\",\\n+}\\n+\\n+\\n+def _clean_option(option):',\n", - " 'from_author': False},\n", - " {'body': 'Made public! :) Will note down for next time',\n", - " 'diff_hunk': '@@ -61,340 +61,430 @@\\n \\n logger = logging.getLogger(__name__)\\n \\n+_options_to_group = {\\n+ \"--multi-gpu\": \"Distributed GPUs\",\\n+ \"--tpu\": \"TPU\",\\n+ \"--use_mps_device\": \"MPS\",\\n+ \"--use_deepspeed\": \"DeepSpeed\",\\n+ \"--use_fsdp\": \"FSDP\",\\n+ \"--use_megatron_lm\": \"Megatron-LM\",\\n+}\\n+\\n+\\n+def _clean_option(option):',\n", - " 'from_author': True}],\n", - " 'context': [{'body': '_The documentation is not available anymore as the PR was closed or merged._',\n", - " 'from_author': False}],\n", - " 'url': 'https://api.github.com/repos/huggingface/accelerate/pulls/810'},\n", - " 1105761867: {'diff': 'diff --git a/src/accelerate/accelerator.py b/src/accelerate/accelerator.py\\nindex 81759962c..287e6899a 100644\\n--- a/src/accelerate/accelerator.py\\n+++ b/src/accelerate/accelerator.py\\n@@ -647,7 +647,15 @@ def _prepare_fsdp(self, *args):\\n \"FSDP Warning: When using FSDP, several parameter groups will be conflated into \"\\n \"a single one due to nested module wrapping and parameter flattening.\"\\n )\\n- optimizer = obj.optimizer.__class__(model.parameters(), **obj.optimizer.defaults)\\n+ try:\\n+ optimizer = obj.optimizer.__class__(model.parameters(), **obj.optimizer.defaults)\\n+ except TypeError:\\n+ if \"differentiable\" in obj.optimizer.defaults:\\n+ # https://github.com/huggingface/accelerate/issues/801\\n+ defaults = {k: v for k, v in obj.optimizer.defaults.items() if k != \"differentiable\"}\\n+ optimizer = obj.optimizer.__class__(model.parameters(), **defaults)\\n+ else:\\n+ raise\\n obj = self.prepare_optimizer(optimizer)\\n optimizers.append(obj)\\n elif isinstance(obj, torch.nn.Module):\\n',\n", - " 'code_comments': [{'body': 'This will delete inplace, we should probably create a new dict here',\n", - " 'diff_hunk': '@@ -647,7 +647,15 @@ def _prepare_fsdp(self, *args):\\n \"FSDP Warning: When using FSDP, several parameter groups will be conflated into \"\\n \"a single one due to nested module wrapping and parameter flattening.\"\\n )\\n- optimizer = obj.optimizer.__class__(model.parameters(), **obj.optimizer.defaults)\\n+ try:\\n+ optimizer = obj.optimizer.__class__(model.parameters(), **obj.optimizer.defaults)\\n+ except TypeError:\\n+ if \"differentiable\" in obj.optimizer.defaults:\\n+ # https://github.com/huggingface/accelerate/issues/801\\n+ del obj.optimizer.defaults[\"differentiable\"]',\n", - " 'from_author': False},\n", - " {'body': '```suggestion\\r\\n defaults = obj.optimizer.defaults.copy()\\r\\n del defaults[\"differentiable\"]\\r\\n optimizer = obj.optimizer.__class__(model.parameters(), **defaults)\\r\\n```',\n", - " 'diff_hunk': '@@ -647,7 +647,15 @@ def _prepare_fsdp(self, *args):\\n \"FSDP Warning: When using FSDP, several parameter groups will be conflated into \"\\n \"a single one due to nested module wrapping and parameter flattening.\"\\n )\\n- optimizer = obj.optimizer.__class__(model.parameters(), **obj.optimizer.defaults)\\n+ try:\\n+ optimizer = obj.optimizer.__class__(model.parameters(), **obj.optimizer.defaults)\\n+ except TypeError:\\n+ if \"differentiable\" in obj.optimizer.defaults:\\n+ # https://github.com/huggingface/accelerate/issues/801\\n+ del obj.optimizer.defaults[\"differentiable\"]\\n+ optimizer = obj.optimizer.__class__(model.parameters(), **obj.optimizer.defaults)',\n", - " 'from_author': True},\n", - " {'body': 'Good point. Done. PTAL.',\n", - " 'diff_hunk': '@@ -647,7 +647,15 @@ def _prepare_fsdp(self, *args):\\n \"FSDP Warning: When using FSDP, several parameter groups will be conflated into \"\\n \"a single one due to nested module wrapping and parameter flattening.\"\\n )\\n- optimizer = obj.optimizer.__class__(model.parameters(), **obj.optimizer.defaults)\\n+ try:\\n+ optimizer = obj.optimizer.__class__(model.parameters(), **obj.optimizer.defaults)\\n+ except TypeError:\\n+ if \"differentiable\" in obj.optimizer.defaults:\\n+ # https://github.com/huggingface/accelerate/issues/801\\n+ del obj.optimizer.defaults[\"differentiable\"]',\n", - " 'from_author': True},\n", - " {'body': '```suggestion\\r\\n defaults = {k: v for k, v in obj.optimizer.defaults.items() if k != \"differentiable\"}\\r\\n```',\n", - " 'diff_hunk': '@@ -647,7 +647,16 @@ def _prepare_fsdp(self, *args):\\n \"FSDP Warning: When using FSDP, several parameter groups will be conflated into \"\\n \"a single one due to nested module wrapping and parameter flattening.\"\\n )\\n- optimizer = obj.optimizer.__class__(model.parameters(), **obj.optimizer.defaults)\\n+ try:\\n+ optimizer = obj.optimizer.__class__(model.parameters(), **obj.optimizer.defaults)\\n+ except TypeError:\\n+ if \"differentiable\" in obj.optimizer.defaults:\\n+ # https://github.com/huggingface/accelerate/issues/801\\n+ defaults = obj.optimizer.defaults.copy()\\n+ del defaults[\"differentiable\"]',\n", - " 'from_author': False}],\n", - " 'context': [{'body': 'closing #801', 'from_author': True},\n", - " {'body': '_The documentation is not available anymore as the PR was closed or merged._',\n", - " 'from_author': False},\n", - " {'body': 'Thank you for the quick fix! 🤗', 'from_author': False}],\n", - " 'url': 'https://api.github.com/repos/huggingface/accelerate/pulls/803'},\n", - " 1105709188: {'diff': 'diff --git a/src/accelerate/commands/accelerate_cli.py b/src/accelerate/commands/accelerate_cli.py\\nindex 6300c7c1d..515a66d1a 100644\\n--- a/src/accelerate/commands/accelerate_cli.py\\n+++ b/src/accelerate/commands/accelerate_cli.py\\n@@ -19,6 +19,7 @@\\n from accelerate.commands.config import config_command_parser\\n from accelerate.commands.env import env_command_parser\\n from accelerate.commands.launch import launch_command_parser\\n+from accelerate.commands.pod import pod_command_parser\\n from accelerate.commands.test import test_command_parser\\n \\n \\n@@ -28,9 +29,10 @@ def main():\\n \\n # Register commands\\n config_command_parser(subparsers=subparsers)\\n+ env_command_parser(subparsers=subparsers)\\n launch_command_parser(subparsers=subparsers)\\n+ pod_command_parser(subparsers=subparsers)\\n test_command_parser(subparsers=subparsers)\\n- env_command_parser(subparsers=subparsers)\\n \\n # Let\\'s go\\n args = parser.parse_args()\\ndiff --git a/src/accelerate/commands/config/cluster.py b/src/accelerate/commands/config/cluster.py\\nindex 0fa3ceab2..acc4a1828 100644\\n--- a/src/accelerate/commands/config/cluster.py\\n+++ b/src/accelerate/commands/config/cluster.py\\n@@ -14,6 +14,8 @@\\n # See the License for the specific language governing permissions and\\n # limitations under the License.\\n \\n+import os\\n+\\n from ...utils import ComputeEnvironment, DistributedType, is_deepspeed_available, is_transformers_available\\n from ...utils.constants import (\\n DEEPSPEED_MULTINODE_LAUNCHERS,\\n@@ -41,6 +43,10 @@ def get_cluster_input():\\n main_process_port = None\\n rdzv_backend = \"static\"\\n same_network = True\\n+ tpu_name = None\\n+ tpu_zone = None\\n+ commands = None\\n+ command_file = None\\n if distributed_type in [DistributedType.MULTI_GPU, DistributedType.MULTI_CPU]:\\n num_machines = _ask_field(\\n \"How many different machines will you use (use more than 1 for multi-node training)? [1]: \",\\n@@ -341,6 +347,50 @@ def get_cluster_input():\\n \"What is the name of the function in your script that should be launched in all parallel scripts? [main]: \",\\n default=\"main\",\\n )\\n+ use_cluster = _ask_field(\\n+ \"Are you using a TPU cluster? [yes/NO]: \",\\n+ _convert_yes_no_to_bool,\\n+ default=False,\\n+ error_message=\"Please enter yes or no.\",\\n+ )\\n+ if use_cluster:\\n+ tpu_name = _ask_field(\\n+ \"What is the name of your TPU cluster? \",\\n+ default=None,\\n+ error_message=\"Please enter the name of your TPU cluster.\",\\n+ )\\n+ tpu_zone = _ask_field(\\n+ \"What is the zone of your TPU cluster? \",\\n+ default=None,\\n+ error_message=\"Please enter the zone of your TPU cluster.\",\\n+ )\\n+ run_commands = _ask_field(\\n+ \"Do you have code you wish to run on startup in each pod? [yes/NO]: \",\\n+ _convert_yes_no_to_bool,\\n+ default=False,\\n+ error_message=\"Please enter yes or no.\",\\n+ )\\n+ if run_commands:\\n+ use_command_file = _ask_field(\\n+ \"Is this code located in a bash script? [yes/NO]: \",\\n+ _convert_yes_no_to_bool,\\n+ default=False,\\n+ error_message=\"Please enter yes or no.\",\\n+ )\\n+ if use_command_file:\\n+ command_file = _ask_field(\\n+ \"What is the path to your bash script? \",\\n+ default=None,\\n+ error_message=\"Please enter the path to your bash script.\",\\n+ )\\n+ command_file = os.path.abspath(command_file)\\n+ else:\\n+ commands = _ask_field(\\n+ \"What commands do you wish to run on startup in each pod? \",\\n+ default=None,\\n+ error_message=\"Please enter the commands you wish to run on startup in each pod as a single string.\",\\n+ )\\n+\\n else:\\n main_training_function = \"main\"\\n \\n@@ -408,4 +458,8 @@ def get_cluster_input():\\n use_cpu=use_cpu,\\n rdzv_backend=rdzv_backend,\\n same_network=same_network,\\n+ tpu_name=tpu_name,\\n+ tpu_zone=tpu_zone,\\n+ commands=commands,\\n+ command_file=command_file,\\n )\\ndiff --git a/src/accelerate/commands/config/config_args.py b/src/accelerate/commands/config/config_args.py\\nindex 61f585858..9a1247c55 100644\\n--- a/src/accelerate/commands/config/config_args.py\\n+++ b/src/accelerate/commands/config/config_args.py\\n@@ -18,7 +18,7 @@\\n import os\\n from dataclasses import dataclass\\n from enum import Enum\\n-from typing import Optional, Union\\n+from typing import List, Optional, Union\\n \\n import yaml\\n \\n@@ -151,6 +151,12 @@ class ClusterConfig(BaseConfig):\\n # args for TPU\\n downcast_bf16: bool = False\\n \\n+ # args for TPU pods\\n+ tpu_name: str = None\\n+ tpu_zone: str = None\\n+ command_file: str = None\\n+ command: List[str] = None\\n+\\n def __post_init__(self):\\n if self.deepspeed_config is None:\\n self.deepspeed_config = {}\\ndiff --git a/src/accelerate/commands/env.py b/src/accelerate/commands/env.py\\nindex b66008e1b..a19c04d4a 100644\\n--- a/src/accelerate/commands/env.py\\n+++ b/src/accelerate/commands/env.py\\n@@ -1,3 +1,19 @@\\n+#!/usr/bin/env python\\n+\\n+# Copyright 2022 The HuggingFace Team. All rights reserved.\\n+#\\n+# Licensed under the Apache License, Version 2.0 (the \"License\");\\n+# you may not use this file except in compliance with the License.\\n+# You may obtain a copy of the License at\\n+#\\n+# http://www.apache.org/licenses/LICENSE-2.0\\n+#\\n+# Unless required by applicable law or agreed to in writing, software\\n+# distributed under the License is distributed on an \"AS IS\" BASIS,\\n+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\\n+# See the License for the specific language governing permissions and\\n+# limitations under the License.\\n+\\n import argparse\\n import os\\n import platform\\ndiff --git a/src/accelerate/commands/pod.py b/src/accelerate/commands/pod.py\\nnew file mode 100644\\nindex 000000000..87cd4f904\\n--- /dev/null\\n+++ b/src/accelerate/commands/pod.py\\n@@ -0,0 +1,152 @@\\n+#!/usr/bin/env python\\n+\\n+# Copyright 2022 The HuggingFace Team. All rights reserved.\\n+#\\n+# Licensed under the Apache License, Version 2.0 (the \"License\");\\n+# you may not use this file except in compliance with the License.\\n+# You may obtain a copy of the License at\\n+#\\n+# http://www.apache.org/licenses/LICENSE-2.0\\n+#\\n+# Unless required by applicable law or agreed to in writing, software\\n+# distributed under the License is distributed on an \"AS IS\" BASIS,\\n+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\\n+# See the License for the specific language governing permissions and\\n+# limitations under the License.\\n+\\n+import argparse\\n+import os\\n+import subprocess\\n+\\n+from accelerate.commands.config.config_args import default_config_file, load_config_from_file\\n+from packaging.version import Version, parse\\n+\\n+\\n+_description = \"Run commands across a pod of TPU VMs for initial setup before running `accelerate launch`. Will also install Accelerate on the pod.\"\\n+\\n+\\n+def pod_command_parser(subparsers=None):\\n+ if subparsers is not None:\\n+ parser = subparsers.add_parser(\"pod-config\", description=_description)\\n+ else:\\n+ parser = argparse.ArgumentParser(\"Accelerate pod-config command\", description=_description)\\n+\\n+ parser.add_argument(\\n+ \"--config_file\",\\n+ type=str,\\n+ default=None,\\n+ help=\"Path to the config file to use for accelerate.\",\\n+ )\\n+\\n+ parser.add_argument(\\n+ \"--pod_config_file\",\\n+ type=str,\\n+ default=None,\\n+ help=\"Path to the config file to use for the pod.\",\\n+ )\\n+\\n+ parser.add_argument(\\n+ \"--command_file\",\\n+ default=None,\\n+ help=\"The path to the file containing the commands to run on the pod on startup.\",\\n+ )\\n+ parser.add_argument(\\n+ \"--command\",\\n+ action=\"append\",\\n+ nargs=\"+\",\\n+ help=\"A command to run on the pod. If not specified, will use the command specified in the command file.\",\\n+ )\\n+ parser.add_argument(\\n+ \"--tpu_name\",\\n+ default=None,\\n+ help=\"The name of the TPU to use. If not specified, will use the TPU specified in the config file.\",\\n+ )\\n+ parser.add_argument(\\n+ \"--tpu_zone\",\\n+ default=None,\\n+ help=\"The zone of the TPU to use. If not specified, will use the zone specified in the config file.\",\\n+ )\\n+ parser.add_argument(\\n+ \"--install_accelerate\",\\n+ action=\"store_true\",\\n+ help=\"Whether to install accelerate on the pod. Defaults to False.\",\\n+ )\\n+ parser.add_argument(\\n+ \"--accelerate_version\",\\n+ default=\"latest\",\\n+ help=\"The version of accelerate to install on the pod. If not specified, will use the latest pypi version. Specify \\'dev\\' to install from GitHub.\",\\n+ )\\n+ parser.add_argument(\\n+ \"--debug\", action=\"store_true\", help=\"If set, will print the command that would be run instead of running it.\"\\n+ )\\n+\\n+ if subparsers is not None:\\n+ parser.set_defaults(func=pod_launcher)\\n+ return parser\\n+\\n+\\n+def pod_launcher(args):\\n+ defaults = None\\n+\\n+ # Get the default from the config file if it exists.\\n+ if args.config_file is not None or os.path.isfile(default_config_file):\\n+ defaults = load_config_from_file(args.config_file)\\n+ if not args.command_file and defaults.command_file is not None and not args.command:\\n+ args.command_file = defaults.command_file\\n+ if not args.command and defaults.command is not None:\\n+ args.command = defaults.command\\n+ if not args.tpu_name:\\n+ args.tpu_name = defaults.tpu_name\\n+ if not args.tpu_zone:\\n+ args.tpu_zone = defaults.tpu_zone\\n+ if args.accelerate_version == \"dev\":\\n+ args.accelerate_version = \"git+https://github.com/huggingface/accelerate.git\"\\n+ elif args.accelerate_version == \"latest\":\\n+ args.accelerate_version = \"accelerate -U\"\\n+ elif isinstance(parse(args.accelerate_version), Version):\\n+ args.accelerate_version = f\"accelerate=={args.accelerate_version}\"\\n+\\n+ if not args.command_file and not args.command:\\n+ raise ValueError(\"You must specify either a command file or a command to run on the pod.\")\\n+\\n+ if args.command_file:\\n+ with open(args.command_file, \"r\") as f:\\n+ args.command = [f.read().splitlines()]\\n+\\n+ # To turn list of lists into list of strings\\n+ args.command = [line for cmd in args.command for line in cmd]\\n+ # Default to the shared folder and install accelerate\\n+ new_cmd = [\"cd /usr/share\"]\\n+ if args.install_accelerate:\\n+ new_cmd += [f\"pip install {args.accelerate_version}\"]\\n+ new_cmd += args.command\\n+ args.command = \"; \".join(new_cmd)\\n+\\n+ # Then send it to gcloud\\n+ # Eventually try to use google-api-core to do this instead of subprocess\\n+ cmd = [\\n+ \"gcloud\",\\n+ \"compute\",\\n+ \"tpus\",\\n+ \"tpu-vm\",\\n+ \"ssh\",\\n+ args.tpu_name,\\n+ \"--zone\",\\n+ args.tpu_zone,\\n+ \"--command\",\\n+ args.command,\\n+ \"--worker\",\\n+ \"all\",\\n+ ]\\n+ if args.debug:\\n+ print(f\"Running {\\' \\'.join(cmd)}\")\\n+ return\\n+ subprocess.run(cmd)\\n+ print(\"Successfully setup pod.\")\\n+\\n+\\n+def main():\\n+ parser = pod_command_parser()\\n+ args = parser.parse_args()\\n+\\n+ pod_launcher(args)\\ndiff --git a/tests/test_cli.py b/tests/test_cli.py\\nindex ceed58290..2479f736d 100644\\n--- a/tests/test_cli.py\\n+++ b/tests/test_cli.py\\n@@ -21,6 +21,7 @@\\n \\n import accelerate\\n from accelerate.test_utils import execute_subprocess_async\\n+from accelerate.test_utils.testing import run_command\\n \\n \\n class AccelerateLauncherTester(unittest.TestCase):\\n@@ -63,3 +64,151 @@ def test_config_compatibility(self):\\n execute_subprocess_async(\\n self.base_cmd + [\"--config_file\", str(config), self.test_file_path], env=os.environ.copy()\\n )\\n+\\n+\\n+class PodConfigTester(unittest.TestCase):\\n+ \"\"\"\\n+ Test case for verifying the `accelerate pod-config` CLI passes the right `gcloud` command.\\n+ \"\"\"\\n+\\n+ tpu_name = \"test-tpu\"\\n+ tpu_zone = \"us-central1-a\"\\n+ command = \"ls\"\\n+ cmd = [\"accelerate\", \"pod-config\"]\\n+ base_output = \"cd /usr/share\"\\n+ command_file = \"tests/test_samples/test_command_file.sh\"\\n+ gcloud = \"Running gcloud compute tpus tpu-vm ssh\"\\n+\\n+ @staticmethod\\n+ def clean_output(output):\\n+ return \"\".join(output).rstrip()\\n+\\n+ def test_base(self):\\n+ output = run_command(\\n+ self.cmd\\n+ + [\"--command\", self.command, \"--tpu_zone\", self.tpu_zone, \"--tpu_name\", self.tpu_name, \"--debug\"],\\n+ return_stdout=True,\\n+ )\\n+ self.assertEqual(\\n+ self.clean_output(output),\\n+ f\"{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all\",\\n+ )\\n+\\n+ def test_base_backward_compatibility(self):\\n+ output = run_command(\\n+ self.cmd\\n+ + [\\n+ \"--config_file\",\\n+ \"tests/test_configs/0_12_0.yaml\",\\n+ \"--command\",\\n+ self.command,\\n+ \"--tpu_zone\",\\n+ self.tpu_zone,\\n+ \"--tpu_name\",\\n+ self.tpu_name,\\n+ \"--debug\",\\n+ ],\\n+ return_stdout=True,\\n+ )\\n+ self.assertEqual(\\n+ self.clean_output(output),\\n+ f\"{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all\",\\n+ )\\n+\\n+ def test_with_config_file(self):\\n+ output = run_command(\\n+ self.cmd + [\"--config_file\", \"tests/test_configs/latest.yaml\", \"--debug\"], return_stdout=True\\n+ )\\n+ self.assertEqual(\\n+ self.clean_output(output),\\n+ f\\'{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo \"hello world\"; echo \"this is a second command\" --worker all\\',\\n+ )\\n+\\n+ def test_with_config_file_and_command(self):\\n+ output = run_command(\\n+ self.cmd + [\"--config_file\", \"tests/test_configs/latest.yaml\", \"--command\", self.command, \"--debug\"],\\n+ return_stdout=True,\\n+ )\\n+ self.assertEqual(\\n+ self.clean_output(output),\\n+ f\"{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all\",\\n+ )\\n+\\n+ def test_with_config_file_and_multiple_command(self):\\n+ output = run_command(\\n+ self.cmd\\n+ + [\\n+ \"--config_file\",\\n+ \"tests/test_configs/latest.yaml\",\\n+ \"--command\",\\n+ self.command,\\n+ \"--command\",\\n+ \\'echo \"Hello World\"\\',\\n+ \"--debug\",\\n+ ],\\n+ return_stdout=True,\\n+ )\\n+ self.assertEqual(\\n+ self.clean_output(output),\\n+ f\\'{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls; echo \"Hello World\" --worker all\\',\\n+ )\\n+\\n+ def test_with_config_file_and_command_file(self):\\n+ output = run_command(\\n+ self.cmd\\n+ + [\"--config_file\", \"tests/test_configs/latest.yaml\", \"--command_file\", self.command_file, \"--debug\"],\\n+ return_stdout=True,\\n+ )\\n+ self.assertEqual(\\n+ self.clean_output(output),\\n+ f\\'{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo \"hello world\"; echo \"this is a second command\" --worker all\\',\\n+ )\\n+\\n+ def test_with_config_file_and_command_file_backward_compatibility(self):\\n+ output = run_command(\\n+ self.cmd\\n+ + [\\n+ \"--config_file\",\\n+ \"tests/test_configs/0_12_0.yaml\",\\n+ \"--command_file\",\\n+ self.command_file,\\n+ \"--tpu_zone\",\\n+ self.tpu_zone,\\n+ \"--tpu_name\",\\n+ self.tpu_name,\\n+ \"--debug\",\\n+ ],\\n+ return_stdout=True,\\n+ )\\n+ self.assertEqual(\\n+ self.clean_output(output),\\n+ f\\'{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo \"hello world\"; echo \"this is a second command\" --worker all\\',\\n+ )\\n+\\n+ def test_accelerate_install(self):\\n+ output = run_command(\\n+ self.cmd + [\"--config_file\", \"tests/test_configs/latest.yaml\", \"--install_accelerate\", \"--debug\"],\\n+ return_stdout=True,\\n+ )\\n+ self.assertEqual(\\n+ self.clean_output(output),\\n+ f\\'{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; pip install accelerate -U; echo \"hello world\"; echo \"this is a second command\" --worker all\\',\\n+ )\\n+\\n+ def test_accelerate_install_version(self):\\n+ output = run_command(\\n+ self.cmd\\n+ + [\\n+ \"--config_file\",\\n+ \"tests/test_configs/latest.yaml\",\\n+ \"--install_accelerate\",\\n+ \"--accelerate_version\",\\n+ \"12.0.0\",\\n+ \"--debug\",\\n+ ],\\n+ return_stdout=True,\\n+ )\\n+ self.assertEqual(\\n+ self.clean_output(output),\\n+ f\\'{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; pip install accelerate==12.0.0; echo \"hello world\"; echo \"this is a second command\" --worker all\\',\\n+ )\\ndiff --git a/tests/test_configs/latest.yaml b/tests/test_configs/latest.yaml\\nindex 87b294cf8..de6be03a4 100644\\n--- a/tests/test_configs/latest.yaml\\n+++ b/tests/test_configs/latest.yaml\\n@@ -15,3 +15,7 @@ num_processes: 1\\n rdzv_backend: static\\n same_network: true\\n use_cpu: false\\n+tpu_name: \\'test-tpu\\'\\n+tpu_zone: \\'us-central1-a\\'\\n+command: null\\n+command_file: tests/test_samples/test_command_file.sh\\n\\\\ No newline at end of file\\ndiff --git a/tests/test_samples/test_command_file.sh b/tests/test_samples/test_command_file.sh\\nnew file mode 100644\\nindex 000000000..592a7d532\\n--- /dev/null\\n+++ b/tests/test_samples/test_command_file.sh\\n@@ -0,0 +1,2 @@\\n+echo \"hello world\"\\n+echo \"this is a second command\"\\n\\\\ No newline at end of file\\n',\n", - " 'code_comments': [{'body': '```suggestion\\r\\n \"Do you have code you wish to run on startup in each pod? [yes/NO]: \",\\r\\n```',\n", - " 'diff_hunk': '@@ -341,6 +347,50 @@ def get_cluster_input():\\n \"What is the name of the function in your script that should be launched in all parallel scripts? [main]: \",\\n default=\"main\",\\n )\\n+ use_cluster = _ask_field(\\n+ \"Are you using a TPU cluster? [yes/NO]: \",\\n+ _convert_yes_no_to_bool,\\n+ default=False,\\n+ error_message=\"Please enter yes or no.\",\\n+ )\\n+ if use_cluster:\\n+ tpu_name = _ask_field(\\n+ \"What is the name of your TPU cluster? \",\\n+ default=None,\\n+ error_message=\"Please enter the name of your TPU cluster.\",\\n+ )\\n+ tpu_zone = _ask_field(\\n+ \"What is the zone of your TPU cluster? \",\\n+ default=None,\\n+ error_message=\"Please enter the zone of your TPU cluster.\",\\n+ )\\n+ run_commands = _ask_field(\\n+ \"Do you have code you wish to be ran on startup in each pod? [yes/NO]: \",',\n", - " 'from_author': False},\n", - " {'body': 'We can allow for mix and match by appending commands from the `args.command_file` to the `args.command`? Would it be better than raising error?',\n", - " 'diff_hunk': '@@ -0,0 +1,152 @@\\n+#!/usr/bin/env python\\n+\\n+# Copyright 2022 The HuggingFace Team. All rights reserved.\\n+#\\n+# Licensed under the Apache License, Version 2.0 (the \"License\");\\n+# you may not use this file except in compliance with the License.\\n+# You may obtain a copy of the License at\\n+#\\n+# http://www.apache.org/licenses/LICENSE-2.0\\n+#\\n+# Unless required by applicable law or agreed to in writing, software\\n+# distributed under the License is distributed on an \"AS IS\" BASIS,\\n+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\\n+# See the License for the specific language governing permissions and\\n+# limitations under the License.\\n+\\n+import argparse\\n+import os\\n+import subprocess\\n+\\n+from accelerate.commands.config.config_args import default_config_file, load_config_from_file\\n+from packaging.version import Version, parse\\n+\\n+\\n+_description = \"Run commands across a pod of TPU VMs for initial setup before running `accelerate launch`. Will also install Accelerate on the pod.\"\\n+\\n+\\n+def pod_command_parser(subparsers=None):\\n+ if subparsers is not None:\\n+ parser = subparsers.add_parser(\"pod-config\", description=_description)\\n+ else:\\n+ parser = argparse.ArgumentParser(\"Accelerate pod-config command\", description=_description)\\n+\\n+ parser.add_argument(\\n+ \"--config_file\",\\n+ type=str,\\n+ default=None,\\n+ help=\"Path to the config file to use for accelerate.\",\\n+ )\\n+\\n+ parser.add_argument(\\n+ \"--pod_config_file\",\\n+ type=str,\\n+ default=None,\\n+ help=\"Path to the config file to use for the pod.\",\\n+ )\\n+\\n+ parser.add_argument(\\n+ \"--command_file\",\\n+ default=None,\\n+ help=\"The path to the file containing the commands to run on the pod on startup.\",\\n+ )\\n+ parser.add_argument(\\n+ \"--command\",\\n+ action=\"append\",\\n+ nargs=\"+\",\\n+ help=\"A command to run on the pod. If not specified, will use the command specified in the command file.\",\\n+ )\\n+ parser.add_argument(\\n+ \"--tpu_name\",\\n+ default=None,\\n+ help=\"The name of the TPU to use. If not specified, will use the TPU specified in the config file.\",\\n+ )\\n+ parser.add_argument(\\n+ \"--tpu_zone\",\\n+ default=None,\\n+ help=\"The zone of the TPU to use. If not specified, will use the zone specified in the config file.\",\\n+ )\\n+ parser.add_argument(\\n+ \"--install_accelerate\",\\n+ action=\"store_true\",\\n+ help=\"Whether to install accelerate on the pod. Defaults to False.\",\\n+ )\\n+ parser.add_argument(\\n+ \"--accelerate_version\",\\n+ default=\"latest\",\\n+ help=\"The version of accelerate to install on the pod. If not specified, will use the latest pypi version. Specify \\'dev\\' to install from GitHub.\",\\n+ )\\n+ parser.add_argument(\\n+ \"--debug\", action=\"store_true\", help=\"If set, will print the command that would be run instead of running it.\"\\n+ )\\n+\\n+ if subparsers is not None:\\n+ parser.set_defaults(func=pod_launcher)\\n+ return parser\\n+\\n+\\n+def pod_launcher(args):\\n+ defaults = None\\n+\\n+ # Get the default from the config file if it exists.\\n+ if args.config_file is not None or os.path.isfile(default_config_file):\\n+ defaults = load_config_from_file(args.config_file)\\n+ if not args.command_file and defaults.command_file is not None and not args.command:\\n+ args.command_file = defaults.command_file\\n+ if not args.command and defaults.command is not None:\\n+ args.command = defaults.command\\n+ if not args.tpu_name:\\n+ args.tpu_name = defaults.tpu_name\\n+ if not args.tpu_zone:\\n+ args.tpu_zone = defaults.tpu_zone\\n+ if args.accelerate_version == \"dev\":\\n+ args.accelerate_version = \"git+https://github.com/huggingface/accelerate.git\"\\n+ elif args.accelerate_version == \"latest\":\\n+ args.accelerate_version = \"accelerate -U\"\\n+ elif isinstance(parse(args.accelerate_version), Version):\\n+ args.accelerate_version = f\"accelerate=={args.accelerate_version}\"\\n+\\n+ if not args.command_file and not args.command:\\n+ raise ValueError(\"You must specify either a command file or a command to run on the pod.\")',\n", - " 'from_author': False},\n", - " {'body': '```suggestion\\r\\n print(f\"Running {\\' \\'.join(cmd)}\")\\r\\n```',\n", - " 'diff_hunk': '@@ -0,0 +1,152 @@\\n+#!/usr/bin/env python\\n+\\n+# Copyright 2022 The HuggingFace Team. All rights reserved.\\n+#\\n+# Licensed under the Apache License, Version 2.0 (the \"License\");\\n+# you may not use this file except in compliance with the License.\\n+# You may obtain a copy of the License at\\n+#\\n+# http://www.apache.org/licenses/LICENSE-2.0\\n+#\\n+# Unless required by applicable law or agreed to in writing, software\\n+# distributed under the License is distributed on an \"AS IS\" BASIS,\\n+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\\n+# See the License for the specific language governing permissions and\\n+# limitations under the License.\\n+\\n+import argparse\\n+import os\\n+import subprocess\\n+\\n+from accelerate.commands.config.config_args import default_config_file, load_config_from_file\\n+from packaging.version import Version, parse\\n+\\n+\\n+_description = \"Run commands across a pod of TPU VMs for initial setup before running `accelerate launch`. Will also install Accelerate on the pod.\"\\n+\\n+\\n+def pod_command_parser(subparsers=None):\\n+ if subparsers is not None:\\n+ parser = subparsers.add_parser(\"pod-config\", description=_description)\\n+ else:\\n+ parser = argparse.ArgumentParser(\"Accelerate pod-config command\", description=_description)\\n+\\n+ parser.add_argument(\\n+ \"--config_file\",\\n+ type=str,\\n+ default=None,\\n+ help=\"Path to the config file to use for accelerate.\",\\n+ )\\n+\\n+ parser.add_argument(\\n+ \"--pod_config_file\",\\n+ type=str,\\n+ default=None,\\n+ help=\"Path to the config file to use for the pod.\",\\n+ )\\n+\\n+ parser.add_argument(\\n+ \"--command_file\",\\n+ default=None,\\n+ help=\"The path to the file containing the commands to run on the pod on startup.\",\\n+ )\\n+ parser.add_argument(\\n+ \"--command\",\\n+ action=\"append\",\\n+ nargs=\"+\",\\n+ help=\"A command to run on the pod. If not specified, will use the command specified in the command file.\",\\n+ )\\n+ parser.add_argument(\\n+ \"--tpu_name\",\\n+ default=None,\\n+ help=\"The name of the TPU to use. If not specified, will use the TPU specified in the config file.\",\\n+ )\\n+ parser.add_argument(\\n+ \"--tpu_zone\",\\n+ default=None,\\n+ help=\"The zone of the TPU to use. If not specified, will use the zone specified in the config file.\",\\n+ )\\n+ parser.add_argument(\\n+ \"--install_accelerate\",\\n+ action=\"store_true\",\\n+ help=\"Whether to install accelerate on the pod. Defaults to False.\",\\n+ )\\n+ parser.add_argument(\\n+ \"--accelerate_version\",\\n+ default=\"latest\",\\n+ help=\"The version of accelerate to install on the pod. If not specified, will use the latest pypi version. Specify \\'dev\\' to install from GitHub.\",\\n+ )\\n+ parser.add_argument(\\n+ \"--debug\", action=\"store_true\", help=\"If set, will print the command that would be run instead of running it.\"\\n+ )\\n+\\n+ if subparsers is not None:\\n+ parser.set_defaults(func=pod_launcher)\\n+ return parser\\n+\\n+\\n+def pod_launcher(args):\\n+ defaults = None\\n+\\n+ # Get the default from the config file if it exists.\\n+ if args.config_file is not None or os.path.isfile(default_config_file):\\n+ defaults = load_config_from_file(args.config_file)\\n+ if not args.command_file and defaults.command_file is not None and not args.command:\\n+ args.command_file = defaults.command_file\\n+ if not args.command and defaults.command is not None:\\n+ args.command = defaults.command\\n+ if not args.tpu_name:\\n+ args.tpu_name = defaults.tpu_name\\n+ if not args.tpu_zone:\\n+ args.tpu_zone = defaults.tpu_zone\\n+ if args.accelerate_version == \"dev\":\\n+ args.accelerate_version = \"git+https://github.com/huggingface/accelerate.git\"\\n+ elif args.accelerate_version == \"latest\":\\n+ args.accelerate_version = \"accelerate -U\"\\n+ elif isinstance(parse(args.accelerate_version), Version):\\n+ args.accelerate_version = f\"accelerate=={args.accelerate_version}\"\\n+\\n+ if not args.command_file and not args.command:\\n+ raise ValueError(\"You must specify either a command file or a command to run on the pod.\")\\n+\\n+ if args.command_file:\\n+ with open(args.command_file, \"r\") as f:\\n+ args.command = [f.read().splitlines()]\\n+\\n+ # To turn list of lists into list of strings\\n+ args.command = [line for cmd in args.command for line in cmd]\\n+ # Default to the shared folder and install accelerate\\n+ new_cmd = [\"cd /usr/share\"]\\n+ if args.install_accelerate:\\n+ new_cmd += [f\"pip install {args.accelerate_version}\"]\\n+ new_cmd += args.command\\n+ args.command = \"; \".join(new_cmd)\\n+\\n+ # Then send it to gcloud\\n+ # Eventually try to use google-api-core to do this instead of subprocess\\n+ cmd = [\\n+ \"gcloud\",\\n+ \"compute\",\\n+ \"tpus\",\\n+ \"tpu-vm\",\\n+ \"ssh\",\\n+ args.tpu_name,\\n+ \"--zone\",\\n+ args.tpu_zone,\\n+ \"--command\",\\n+ args.command,\\n+ \"--worker\",\\n+ \"all\",\\n+ ]\\n+ if args.debug:\\n+ print(cmd)',\n", - " 'from_author': False},\n", - " {'body': \"That's a test class which is inherently private, thus we don't need to add all those `_` that hurt readability :-) \",\n", - " 'diff_hunk': '@@ -63,3 +65,155 @@ def test_config_compatibility(self):\\n execute_subprocess_async(\\n self.base_cmd + [\"--config_file\", str(config), self.test_file_path], env=os.environ.copy()\\n )\\n+\\n+\\n+class PodConfigTester(unittest.TestCase):\\n+ \"\"\"\\n+ Test case for verifying the `accelerate pod-config` CLI passes the right `gcloud` command.\\n+ \"\"\"\\n+\\n+ _tpu_name = \"test-tpu\"\\n+ _tpu_zone = \"us-central1-a\"\\n+ _command = \"ls\"\\n+ cmd = [\"accelerate\", \"pod-config\"]\\n+ _base_output = \"cd /usr/share\"\\n+ _command_file = \"tests/test_samples/test_command_file.sh\"',\n", - " 'from_author': False},\n", - " {'body': \"With only allowing one vs the other it keeps the API simplistic, as otherwise we then have to worry about when in the order should the command come from in the CLI vs the bash script and that can be confusing to users. However in this particular case the check is just to ensure that you've passed *some* command to run in :) \",\n", - " 'diff_hunk': '@@ -0,0 +1,152 @@\\n+#!/usr/bin/env python\\n+\\n+# Copyright 2022 The HuggingFace Team. All rights reserved.\\n+#\\n+# Licensed under the Apache License, Version 2.0 (the \"License\");\\n+# you may not use this file except in compliance with the License.\\n+# You may obtain a copy of the License at\\n+#\\n+# http://www.apache.org/licenses/LICENSE-2.0\\n+#\\n+# Unless required by applicable law or agreed to in writing, software\\n+# distributed under the License is distributed on an \"AS IS\" BASIS,\\n+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\\n+# See the License for the specific language governing permissions and\\n+# limitations under the License.\\n+\\n+import argparse\\n+import os\\n+import subprocess\\n+\\n+from accelerate.commands.config.config_args import default_config_file, load_config_from_file\\n+from packaging.version import Version, parse\\n+\\n+\\n+_description = \"Run commands across a pod of TPU VMs for initial setup before running `accelerate launch`. Will also install Accelerate on the pod.\"\\n+\\n+\\n+def pod_command_parser(subparsers=None):\\n+ if subparsers is not None:\\n+ parser = subparsers.add_parser(\"pod-config\", description=_description)\\n+ else:\\n+ parser = argparse.ArgumentParser(\"Accelerate pod-config command\", description=_description)\\n+\\n+ parser.add_argument(\\n+ \"--config_file\",\\n+ type=str,\\n+ default=None,\\n+ help=\"Path to the config file to use for accelerate.\",\\n+ )\\n+\\n+ parser.add_argument(\\n+ \"--pod_config_file\",\\n+ type=str,\\n+ default=None,\\n+ help=\"Path to the config file to use for the pod.\",\\n+ )\\n+\\n+ parser.add_argument(\\n+ \"--command_file\",\\n+ default=None,\\n+ help=\"The path to the file containing the commands to run on the pod on startup.\",\\n+ )\\n+ parser.add_argument(\\n+ \"--command\",\\n+ action=\"append\",\\n+ nargs=\"+\",\\n+ help=\"A command to run on the pod. If not specified, will use the command specified in the command file.\",\\n+ )\\n+ parser.add_argument(\\n+ \"--tpu_name\",\\n+ default=None,\\n+ help=\"The name of the TPU to use. If not specified, will use the TPU specified in the config file.\",\\n+ )\\n+ parser.add_argument(\\n+ \"--tpu_zone\",\\n+ default=None,\\n+ help=\"The zone of the TPU to use. If not specified, will use the zone specified in the config file.\",\\n+ )\\n+ parser.add_argument(\\n+ \"--install_accelerate\",\\n+ action=\"store_true\",\\n+ help=\"Whether to install accelerate on the pod. Defaults to False.\",\\n+ )\\n+ parser.add_argument(\\n+ \"--accelerate_version\",\\n+ default=\"latest\",\\n+ help=\"The version of accelerate to install on the pod. If not specified, will use the latest pypi version. Specify \\'dev\\' to install from GitHub.\",\\n+ )\\n+ parser.add_argument(\\n+ \"--debug\", action=\"store_true\", help=\"If set, will print the command that would be run instead of running it.\"\\n+ )\\n+\\n+ if subparsers is not None:\\n+ parser.set_defaults(func=pod_launcher)\\n+ return parser\\n+\\n+\\n+def pod_launcher(args):\\n+ defaults = None\\n+\\n+ # Get the default from the config file if it exists.\\n+ if args.config_file is not None or os.path.isfile(default_config_file):\\n+ defaults = load_config_from_file(args.config_file)\\n+ if not args.command_file and defaults.command_file is not None and not args.command:\\n+ args.command_file = defaults.command_file\\n+ if not args.command and defaults.command is not None:\\n+ args.command = defaults.command\\n+ if not args.tpu_name:\\n+ args.tpu_name = defaults.tpu_name\\n+ if not args.tpu_zone:\\n+ args.tpu_zone = defaults.tpu_zone\\n+ if args.accelerate_version == \"dev\":\\n+ args.accelerate_version = \"git+https://github.com/huggingface/accelerate.git\"\\n+ elif args.accelerate_version == \"latest\":\\n+ args.accelerate_version = \"accelerate -U\"\\n+ elif isinstance(parse(args.accelerate_version), Version):\\n+ args.accelerate_version = f\"accelerate=={args.accelerate_version}\"\\n+\\n+ if not args.command_file and not args.command:\\n+ raise ValueError(\"You must specify either a command file or a command to run on the pod.\")',\n", - " 'from_author': True},\n", - " {'body': 'We can keep this idea open though and if users want that we can enable it',\n", - " 'diff_hunk': '@@ -0,0 +1,152 @@\\n+#!/usr/bin/env python\\n+\\n+# Copyright 2022 The HuggingFace Team. All rights reserved.\\n+#\\n+# Licensed under the Apache License, Version 2.0 (the \"License\");\\n+# you may not use this file except in compliance with the License.\\n+# You may obtain a copy of the License at\\n+#\\n+# http://www.apache.org/licenses/LICENSE-2.0\\n+#\\n+# Unless required by applicable law or agreed to in writing, software\\n+# distributed under the License is distributed on an \"AS IS\" BASIS,\\n+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\\n+# See the License for the specific language governing permissions and\\n+# limitations under the License.\\n+\\n+import argparse\\n+import os\\n+import subprocess\\n+\\n+from accelerate.commands.config.config_args import default_config_file, load_config_from_file\\n+from packaging.version import Version, parse\\n+\\n+\\n+_description = \"Run commands across a pod of TPU VMs for initial setup before running `accelerate launch`. Will also install Accelerate on the pod.\"\\n+\\n+\\n+def pod_command_parser(subparsers=None):\\n+ if subparsers is not None:\\n+ parser = subparsers.add_parser(\"pod-config\", description=_description)\\n+ else:\\n+ parser = argparse.ArgumentParser(\"Accelerate pod-config command\", description=_description)\\n+\\n+ parser.add_argument(\\n+ \"--config_file\",\\n+ type=str,\\n+ default=None,\\n+ help=\"Path to the config file to use for accelerate.\",\\n+ )\\n+\\n+ parser.add_argument(\\n+ \"--pod_config_file\",\\n+ type=str,\\n+ default=None,\\n+ help=\"Path to the config file to use for the pod.\",\\n+ )\\n+\\n+ parser.add_argument(\\n+ \"--command_file\",\\n+ default=None,\\n+ help=\"The path to the file containing the commands to run on the pod on startup.\",\\n+ )\\n+ parser.add_argument(\\n+ \"--command\",\\n+ action=\"append\",\\n+ nargs=\"+\",\\n+ help=\"A command to run on the pod. If not specified, will use the command specified in the command file.\",\\n+ )\\n+ parser.add_argument(\\n+ \"--tpu_name\",\\n+ default=None,\\n+ help=\"The name of the TPU to use. If not specified, will use the TPU specified in the config file.\",\\n+ )\\n+ parser.add_argument(\\n+ \"--tpu_zone\",\\n+ default=None,\\n+ help=\"The zone of the TPU to use. If not specified, will use the zone specified in the config file.\",\\n+ )\\n+ parser.add_argument(\\n+ \"--install_accelerate\",\\n+ action=\"store_true\",\\n+ help=\"Whether to install accelerate on the pod. Defaults to False.\",\\n+ )\\n+ parser.add_argument(\\n+ \"--accelerate_version\",\\n+ default=\"latest\",\\n+ help=\"The version of accelerate to install on the pod. If not specified, will use the latest pypi version. Specify \\'dev\\' to install from GitHub.\",\\n+ )\\n+ parser.add_argument(\\n+ \"--debug\", action=\"store_true\", help=\"If set, will print the command that would be run instead of running it.\"\\n+ )\\n+\\n+ if subparsers is not None:\\n+ parser.set_defaults(func=pod_launcher)\\n+ return parser\\n+\\n+\\n+def pod_launcher(args):\\n+ defaults = None\\n+\\n+ # Get the default from the config file if it exists.\\n+ if args.config_file is not None or os.path.isfile(default_config_file):\\n+ defaults = load_config_from_file(args.config_file)\\n+ if not args.command_file and defaults.command_file is not None and not args.command:\\n+ args.command_file = defaults.command_file\\n+ if not args.command and defaults.command is not None:\\n+ args.command = defaults.command\\n+ if not args.tpu_name:\\n+ args.tpu_name = defaults.tpu_name\\n+ if not args.tpu_zone:\\n+ args.tpu_zone = defaults.tpu_zone\\n+ if args.accelerate_version == \"dev\":\\n+ args.accelerate_version = \"git+https://github.com/huggingface/accelerate.git\"\\n+ elif args.accelerate_version == \"latest\":\\n+ args.accelerate_version = \"accelerate -U\"\\n+ elif isinstance(parse(args.accelerate_version), Version):\\n+ args.accelerate_version = f\"accelerate=={args.accelerate_version}\"\\n+\\n+ if not args.command_file and not args.command:\\n+ raise ValueError(\"You must specify either a command file or a command to run on the pod.\")',\n", - " 'from_author': True}],\n", - " 'context': [{'body': '_The documentation is not available anymore as the PR was closed or merged._',\n", - " 'from_author': False}],\n", - " 'url': 'https://api.github.com/repos/huggingface/accelerate/pulls/802'},\n", - " 1105644322: {'diff': 'diff --git a/src/accelerate/commands/launch.py b/src/accelerate/commands/launch.py\\nindex b9f6c1089..7c8534e93 100644\\n--- a/src/accelerate/commands/launch.py\\n+++ b/src/accelerate/commands/launch.py\\n@@ -592,6 +592,12 @@ def deepspeed_launcher(args):\\n num_machines = getattr(args, \"num_machines\")\\n main_process_ip = getattr(args, \"main_process_ip\")\\n main_process_port = getattr(args, \"main_process_port\")\\n+\\n+ # make sure launcher is not None\\n+ if args.deepspeed_multinode_launcher is None:\\n+ # set to default pdsh\\n+ setattr(args, \"deepspeed_multinode_launcher\", DEEPSPEED_MULTINODE_LAUNCHERS[0])\\n+\\n if num_machines > 1 and args.deepspeed_multinode_launcher != DEEPSPEED_MULTINODE_LAUNCHERS[1]:\\n cmd = [\"deepspeed\", \"--no_local_rank\"]\\n cmd.extend([\"--hostfile\", str(args.deepspeed_hostfile), \"--launcher\", str(args.deepspeed_multinode_launcher)])\\n',\n", - " 'code_comments': [],\n", - " 'context': [{'body': '_The documentation is not available anymore as the PR was closed or merged._',\n", - " 'from_author': False}],\n", - " 'url': 'https://api.github.com/repos/huggingface/accelerate/pulls/800'},\n", - " 1103202807: {'diff': 'diff --git a/src/accelerate/big_modeling.py b/src/accelerate/big_modeling.py\\nindex 83c6738af..e277e89a2 100644\\n--- a/src/accelerate/big_modeling.py\\n+++ b/src/accelerate/big_modeling.py\\n@@ -245,11 +245,15 @@ def dispatch_model(\\n check_device_map(model, device_map)\\n \\n if main_device is None:\\n- main_device = [d for d in device_map.values() if d not in [\"cpu\", \"disk\"]][0]\\n+ if set(device_map.values()) == {\"cpu\"} or set(device_map.values()) == {\"cpu\", \"disk\"}:\\n+ main_device = \"cpu\"\\n+ else:\\n+ main_device = [d for d in device_map.values() if d not in [\"cpu\", \"disk\"]][0]\\n \\n- cpu_modules = [name for name, device in device_map.items() if device == \"cpu\"]\\n- if state_dict is None and len(cpu_modules) > 0:\\n- state_dict = extract_submodules_state_dict(model.state_dict(), cpu_modules)\\n+ if main_device != \"cpu\":\\n+ cpu_modules = [name for name, device in device_map.items() if device == \"cpu\"]\\n+ if state_dict is None and len(cpu_modules) > 0:\\n+ state_dict = extract_submodules_state_dict(model.state_dict(), cpu_modules)\\n \\n disk_modules = [name for name, device in device_map.items() if device == \"disk\"]\\n if offload_dir is None and len(disk_modules) > 0:\\n@@ -266,7 +270,8 @@ def dispatch_model(\\n execution_device = {\\n name: main_device if device in [\"cpu\", \"disk\"] else device for name, device in device_map.items()\\n }\\n- offload = {name: device in [\"cpu\", \"disk\"] for name, device in device_map.items()}\\n+ offloaded_devices = [\"disk\"] if main_device == \"cpu\" else [\"cpu\", \"disk\"]\\n+ offload = {name: device in offloaded_devices for name, device in device_map.items()}\\n save_folder = offload_dir if len(disk_modules) > 0 else None\\n if state_dict is not None or save_folder is not None:\\n weights_map = OffloadedWeightsLoader(state_dict=state_dict, save_folder=save_folder)\\n',\n", - " 'code_comments': [],\n", - " 'context': [{'body': '_The documentation is not available anymore as the PR was closed or merged._',\n", - " 'from_author': False},\n", - " {'body': \"Is this expected to work on the `mps` device too? I've checked #596, but the only device I see here is `cpu`:\\r\\nhttps://github.com/huggingface/accelerate/blob/0de16441266ba9a799aa338aa75c73bf078429e9/src/accelerate/utils/modeling.py#L464\\r\\n\\r\\nFurthermore, `get_max_memory` doesn't seem to take it into account: https://github.com/huggingface/accelerate/blob/b816e258a95904736c8e3a5a2b14a18d5f407f4a/src/accelerate/utils/modeling.py#L275-L284\\r\\n\\r\\nAlternatively, is it possible to override using a custom `device_map`?\\r\\n\\r\\nReference: https://github.com/huggingface/diffusers/pull/1042\",\n", - " 'from_author': False},\n", - " {'body': 'No we don\\'t support the \"mps\" device at all in big model inference. `device_map=\"auto\"` will load the model on the CPU and then the user can\\'t move them to the MPS device if they want.',\n", - " 'from_author': True},\n", - " {'body': 'Ok understood, thanks! However, we get errors after moving to the MPS device, if we use `device_map=\"auto\"`. I\\'m not sure what\\'s causing it, but some tensors are being moved to the CPU when performing inference. I\\'ll investigate further.',\n", - " 'from_author': False},\n", - " {'body': 'That\\'s the side effect of using `device_map=\"auto\"` @Narsil was warning you about. It does more than just load the model with empty weights but adds hooks to the model.',\n", - " 'from_author': True},\n", - " {'body': 'Yeah I saw the hooks while stepping with the debugger. One of the reasons why we want to use this is for easier communication (just use `accelerate` and `device_map=\"auto\"`). We\\'ll see what we can do :)',\n", - " 'from_author': False},\n", - " {'body': '@pcuenca Loading weights efficiently is like 2 functions from `accelerate`.\\r\\n\\r\\nhttps://github.com/huggingface/accelerate/blob/main/src/accelerate/big_modeling.py#L35-L110\\r\\n\\r\\nAnd loading after the fact https://github.com/huggingface/transformers_bloom_parallel/blob/main/generate.py#L211-L270\\r\\n(Couldn\\'t fetch rapidly from `accelerate` but it has some more complete and robust function).\\r\\n\\r\\nTo be completely honest, I feel like all this should be in Pytorch proper. the `meta` device is really a bad workaround.\\r\\n\\r\\nIn the meantime maybe instead of pushing towards `device_map=\"auto\"` you could instead use the 2 accelerate functions and wrapping them up nicely in `from_pretrained` ?',\n", - " 'from_author': False},\n", - " {'body': '> In the meantime maybe instead of pushing towards `device_map=\"auto\"` you could instead use the 2 accelerate functions and wrapping them up nicely in `from_pretrained` ?\\r\\n\\r\\nThat\\'s what I was thinking, yes! What I\\'m not sure about is whether to apply those only for the `mps` device (and keep using `device_map` in the others), or something else. I\\'ll do some experiments to see what it looks like in practice. Thanks a lot!',\n", - " 'from_author': False}],\n", - " 'url': 'https://api.github.com/repos/huggingface/accelerate/pulls/797'},\n", - " 1102193335: {'diff': 'diff --git a/src/accelerate/utils/offload.py b/src/accelerate/utils/offload.py\\nindex 750ff9d57..bc0fcdef1 100644\\n--- a/src/accelerate/utils/offload.py\\n+++ b/src/accelerate/utils/offload.py\\n@@ -178,5 +178,13 @@ def extract_submodules_state_dict(state_dict: Dict[str, torch.Tensor], submodule\\n \"\"\"\\n result = {}\\n for module_name in submodule_names:\\n- result.update({key: param for key, param in state_dict.items() if key.startswith(module_name)})\\n+ # We want to catch module_name parameter (module_name.xxx) or potentially module_name, but not any of the\\n+ # submodules that could being like module_name (transformers.h.1 and transformers.h.10 for instance)\\n+ result.update(\\n+ {\\n+ key: param\\n+ for key, param in state_dict.items()\\n+ if key == module_name or key.startswith(module_name + \".\")\\n+ }\\n+ )\\n return result\\ndiff --git a/tests/test_offload.py b/tests/test_offload.py\\nindex 765a9685d..a9ebd36ee 100644\\n--- a/tests/test_offload.py\\n+++ b/tests/test_offload.py\\n@@ -21,6 +21,7 @@\\n \\n from accelerate.utils import (\\n OffloadedWeightsLoader,\\n+ extract_submodules_state_dict,\\n is_torch_version,\\n load_offloaded_weight,\\n offload_state_dict,\\n@@ -105,3 +106,12 @@ def test_offload_weights_loader(self):\\n self.assertEqual(sorted(weight_map), sorted(state_dict.keys()))\\n for key, param in state_dict.items():\\n self.assertTrue(torch.allclose(param, weight_map[key]))\\n+\\n+ def test_extract_submodules_state_dict(self):\\n+ state_dict = {\"a.1\": 0, \"a.10\": 1, \"a.2\": 2}\\n+ extracted = extract_submodules_state_dict(state_dict, [\"a.1\", \"a.2\"])\\n+ self.assertDictEqual(extracted, {\"a.1\": 0, \"a.2\": 2})\\n+\\n+ state_dict = {\"a.1.a\": 0, \"a.10.a\": 1, \"a.2.a\": 2}\\n+ extracted = extract_submodules_state_dict(state_dict, [\"a.1\", \"a.2\"])\\n+ self.assertDictEqual(extracted, {\"a.1.a\": 0, \"a.2.a\": 2})\\n',\n", - " 'code_comments': [],\n", - " 'context': [{'body': '_The documentation is not available anymore as the PR was closed or merged._',\n", - " 'from_author': False}],\n", - " 'url': 'https://api.github.com/repos/huggingface/accelerate/pulls/795'},\n", - " 1101114625: {'diff': 'diff --git a/src/accelerate/tracking.py b/src/accelerate/tracking.py\\nindex a438ab45b..3a31fed93 100644\\n--- a/src/accelerate/tracking.py\\n+++ b/src/accelerate/tracking.py\\n@@ -15,15 +15,23 @@\\n # Expectation:\\n # Provide a project dir name, then each type of logger gets stored in project/{`logging_dir`}\\n \\n+import json\\n import os\\n import time\\n from abc import ABCMeta, abstractmethod, abstractproperty\\n-from typing import List, Optional, Union\\n+from typing import Any, Dict, List, Optional, Union\\n \\n import yaml\\n \\n from .logging import get_logger\\n-from .utils import LoggerType, is_aim_available, is_comet_ml_available, is_tensorboard_available, is_wandb_available\\n+from .utils import (\\n+ LoggerType,\\n+ is_aim_available,\\n+ is_comet_ml_available,\\n+ is_mlflow_available,\\n+ is_tensorboard_available,\\n+ is_wandb_available,\\n+)\\n \\n \\n _available_trackers = []\\n@@ -48,6 +56,10 @@\\n \\n _available_trackers.append(LoggerType.AIM)\\n \\n+if is_mlflow_available():\\n+ import mlflow\\n+\\n+ _available_trackers.append(LoggerType.MLFLOW)\\n \\n logger = get_logger(__name__)\\n \\n@@ -398,9 +410,140 @@ def finish(self):\\n self.writer.close()\\n \\n \\n+class MLflowTracker(GeneralTracker):\\n+ \"\"\"\\n+ A `Tracker` class that supports `mlflow`. Should be initialized at the start of your script.\\n+\\n+ Args:\\n+ experiment_name (`str`, *optional*):\\n+ Name of the experiment. Environment variable MLFLOW_EXPERIMENT_NAME has priority over this argument.\\n+ logging_dir (`str` or `os.PathLike`, defaults to `\".\"`):\\n+ Location for mlflow logs to be stored.\\n+ run_id (`str`, *optional*):\\n+ If specified, get the run with the specified UUID and log parameters and metrics under that run. The run’s\\n+ end time is unset and its status is set to running, but the run’s other attributes (source_version,\\n+ source_type, etc.) are not changed. Environment variable MLFLOW_RUN_ID has priority over this argument.\\n+ tags (`Dict[str, str]`, *optional*):\\n+ An optional `dict` of `str` keys and values, or a `str` dump from a `dict`, to set as tags on the run. If a\\n+ run is being resumed, these tags are set on the resumed run. If a new run is being created, these tags are\\n+ set on the new run. Environment variable MLFLOW_TAGS has priority over this argument.\\n+ nested_run (`bool`, *optional*, defaults to `False`):\\n+ Controls whether run is nested in parent run. True creates a nested run. Environment variable\\n+ MLFLOW_NESTED_RUN has priority over this argument.\\n+ run_name (`str`, *optional*):\\n+ Name of new run (stored as a mlflow.runName tag). Used only when `run_id` is unspecified.\\n+ description (`str`, *optional*):\\n+ An optional string that populates the description box of the run. If a run is being resumed, the\\n+ description is set on the resumed run. If a new run is being created, the description is set on the new\\n+ run.\\n+ \"\"\"\\n+\\n+ name = \"mlflow\"\\n+ requires_logging_directory = True\\n+\\n+ def __init__(\\n+ self,\\n+ experiment_name: str = None,\\n+ logging_dir: Optional[Union[str, os.PathLike]] = \".\",\\n+ run_id: Optional[str] = None,\\n+ tags: Optional[Union[Dict[str, Any], str]] = None,\\n+ nested_run: Optional[bool] = False,\\n+ run_name: Optional[str] = None,\\n+ description: Optional[str] = None,\\n+ ):\\n+\\n+ experiment_name = os.getenv(\"MLFLOW_EXPERIMENT_NAME\", experiment_name)\\n+ run_id = os.getenv(\"MLFLOW_RUN_ID\", run_id)\\n+ tags = os.getenv(\"MLFLOW_TAGS\", tags)\\n+ if isinstance(tags, str):\\n+ tags = json.loads(tags)\\n+\\n+ nested_run = os.getenv(\"MLFLOW_NESTED_RUN\", nested_run)\\n+\\n+ experiment_id = mlflow.create_experiment(\\n+ name=experiment_name,\\n+ artifact_location=logging_dir,\\n+ tags=tags,\\n+ )\\n+\\n+ self.active_run = mlflow.start_run(\\n+ run_id=run_id,\\n+ experiment_id=experiment_id,\\n+ run_name=run_name,\\n+ nested=nested_run,\\n+ tags=tags,\\n+ description=description,\\n+ )\\n+\\n+ logger.debug(f\"Initialized mlflow experiment {experiment_name}\")\\n+ logger.debug(\\n+ \"Make sure to log any initial configurations with `self.store_init_configuration` before training!\"\\n+ )\\n+\\n+ @property\\n+ def tracker(self):\\n+ return self.active_run\\n+\\n+ def store_init_configuration(self, values: dict):\\n+ \"\"\"\\n+ Logs `values` as hyperparameters for the run. Should be run at the beginning of your experiment.\\n+\\n+ Args:\\n+ values (`dict`):\\n+ Values to be stored as initial hyperparameters as key-value pairs.\\n+ \"\"\"\\n+\\n+ for name, value in list(values.items()):\\n+ # internally, all values are converted to str in MLflow\\n+ if len(str(value)) > mlflow.utils.validation.MAX_PARAM_VAL_LENGTH:\\n+ logger.warning(\\n+ f\\'Trainer is attempting to log a value of \"{value}\" for key \"{name}\" as a parameter. MLflow\\\\\\'s\\'\\n+ f\" log_param() only accepts values no longer than {mlflow.utils.validation.MAX_PARAM_VAL_LENGTH} characters so we dropped this attribute.\"\\n+ )\\n+ del values[name]\\n+\\n+ values_list = list(values.items())\\n+\\n+ # MLflow cannot log more than 100 values in one go, so we have to split it\\n+ for i in range(0, len(values_list), mlflow.utils.validation.MAX_PARAMS_TAGS_PER_BATCH):\\n+ mlflow.log_params(dict(values_list[i : i + mlflow.utils.validation.MAX_PARAMS_TAGS_PER_BATCH]))\\n+\\n+ logger.debug(\"Stored initial configuration hyperparameters to MLflow\")\\n+\\n+ def log(self, values: dict, step: Optional[int]):\\n+ \"\"\"\\n+ Logs `values` to the current run.\\n+\\n+ Args:\\n+ values (`dict`):\\n+ Values to be logged as key-value pairs.\\n+ step (`int`, *optional*):\\n+ The run step. If included, the log will be affiliated with this step.\\n+ \"\"\"\\n+ metrics = {}\\n+ for k, v in values.items():\\n+ if isinstance(v, (int, float)):\\n+ metrics[k] = v\\n+ else:\\n+ logger.warning(\\n+ f\\'MLflowTracker is attempting to log a value of \"{v}\" of type {type(v)} for key \"{k}\" as a metric. \\'\\n+ \"MLflow\\'s log_metric() only accepts float and int types so we dropped this attribute.\"\\n+ )\\n+\\n+ mlflow.log_metrics(metrics, step=step)\\n+ logger.debug(\"Successfully logged to mlflow\")\\n+\\n+ def finish(self):\\n+ \"\"\"\\n+ End the active MLflow run.\\n+ \"\"\"\\n+ mlflow.end_run()\\n+\\n+\\n LOGGER_TYPE_TO_CLASS = {\\n \"aim\": AimTracker,\\n \"comet_ml\": CometMLTracker,\\n+ \"mlflow\": MLflowTracker,\\n \"tensorboard\": TensorBoardTracker,\\n \"wandb\": WandBTracker,\\n }\\n@@ -424,6 +567,7 @@ def filter_trackers(\\n - `\"tensorboard\"`\\n - `\"wandb\"`\\n - `\"comet_ml\"`\\n+ - `\"mlflow\"`\\n If `\"all\"` is selected, will pick up all available trackers in the environment and initialize them. Can\\n also accept implementations of `GeneralTracker` for custom trackers, and can be combined with `\"all\"`.\\n logging_dir (`str`, `os.PathLike`, *optional*):\\ndiff --git a/src/accelerate/utils/__init__.py b/src/accelerate/utils/__init__.py\\nindex b22518f9a..a017a2ad5 100644\\n--- a/src/accelerate/utils/__init__.py\\n+++ b/src/accelerate/utils/__init__.py\\n@@ -31,6 +31,7 @@\\n is_datasets_available,\\n is_deepspeed_available,\\n is_megatron_lm_available,\\n+ is_mlflow_available,\\n is_rich_available,\\n is_sagemaker_available,\\n is_tensorboard_available,\\ndiff --git a/src/accelerate/utils/dataclasses.py b/src/accelerate/utils/dataclasses.py\\nindex 8227fa5bc..7dd8798e9 100644\\n--- a/src/accelerate/utils/dataclasses.py\\n+++ b/src/accelerate/utils/dataclasses.py\\n@@ -203,6 +203,7 @@ class LoggerType(BaseEnum):\\n TENSORBOARD = \"tensorboard\"\\n WANDB = \"wandb\"\\n COMETML = \"comet_ml\"\\n+ MLFLOW = \"mlflow\"\\n \\n \\n class PrecisionType(BaseEnum):\\ndiff --git a/src/accelerate/utils/imports.py b/src/accelerate/utils/imports.py\\nindex c33258a92..892ff63f8 100644\\n--- a/src/accelerate/utils/imports.py\\n+++ b/src/accelerate/utils/imports.py\\n@@ -136,3 +136,7 @@ def is_sagemaker_available():\\n \\n def is_tqdm_available():\\n return importlib.util.find_spec(\"tqdm\") is not None\\n+\\n+\\n+def is_mlflow_available():\\n+ return importlib.util.find_spec(\"mlflow\") is not None\\n',\n", - " 'code_comments': [{'body': '```suggestion\\r\\n tags (`Dict[str, str]`, *optional*):\\r\\n```',\n", - " 'diff_hunk': '@@ -398,9 +410,143 @@ def finish(self):\\n self.writer.close()\\n \\n \\n+class MLflowTracker(GeneralTracker):\\n+ \"\"\"\\n+ A `Tracker` class that supports `mlflow`. Should be initialized at the start of your script.\\n+\\n+ Args:\\n+ experiment_name (`str`):\\n+ Name of the experiment. Environment variable MLFLOW_EXPERIMENT_NAME has priority over this argument.\\n+ logging_dir (`str`, `os.PathLike`):\\n+ Location for mlflow logs to be stored.\\n+ run_id (`str`):\\n+ If specified, get the run with the specified UUID and log parameters and metrics under that run. The run’s\\n+ end time is unset and its status is set to running, but the run’s other attributes (source_version,\\n+ source_type, etc.) are not changed. Environment variable MLFLOW_RUN_ID has priority over this argument.\\n+ tags (`dict`, `str`):',\n", - " 'from_author': False},\n", - " {'body': '```suggestion\\r\\n run_id (`str`, *optional*):\\r\\n```',\n", - " 'diff_hunk': '@@ -398,9 +410,143 @@ def finish(self):\\n self.writer.close()\\n \\n \\n+class MLflowTracker(GeneralTracker):\\n+ \"\"\"\\n+ A `Tracker` class that supports `mlflow`. Should be initialized at the start of your script.\\n+\\n+ Args:\\n+ experiment_name (`str`):\\n+ Name of the experiment. Environment variable MLFLOW_EXPERIMENT_NAME has priority over this argument.\\n+ logging_dir (`str`, `os.PathLike`):\\n+ Location for mlflow logs to be stored.\\n+ run_id (`str`):',\n", - " 'from_author': False},\n", - " {'body': '```suggestion\\r\\n logging_dir (`str` or `os.PathLike`, defaults to `\".\"`):\\r\\n```',\n", - " 'diff_hunk': '@@ -398,9 +410,143 @@ def finish(self):\\n self.writer.close()\\n \\n \\n+class MLflowTracker(GeneralTracker):\\n+ \"\"\"\\n+ A `Tracker` class that supports `mlflow`. Should be initialized at the start of your script.\\n+\\n+ Args:\\n+ experiment_name (`str`):\\n+ Name of the experiment. Environment variable MLFLOW_EXPERIMENT_NAME has priority over this argument.\\n+ logging_dir (`str`, `os.PathLike`):',\n", - " 'from_author': False},\n", - " {'body': '```suggestion\\r\\n experiment_name (`str`, *optional*):\\r\\n```',\n", - " 'diff_hunk': '@@ -398,9 +410,143 @@ def finish(self):\\n self.writer.close()\\n \\n \\n+class MLflowTracker(GeneralTracker):\\n+ \"\"\"\\n+ A `Tracker` class that supports `mlflow`. Should be initialized at the start of your script.\\n+\\n+ Args:\\n+ experiment_name (`str`):',\n", - " 'from_author': False},\n", - " {'body': '```suggestion\\r\\n nested_run (`bool`, *optional*, defaults to `False`):\\r\\n```',\n", - " 'diff_hunk': '@@ -398,9 +410,143 @@ def finish(self):\\n self.writer.close()\\n \\n \\n+class MLflowTracker(GeneralTracker):\\n+ \"\"\"\\n+ A `Tracker` class that supports `mlflow`. Should be initialized at the start of your script.\\n+\\n+ Args:\\n+ experiment_name (`str`):\\n+ Name of the experiment. Environment variable MLFLOW_EXPERIMENT_NAME has priority over this argument.\\n+ logging_dir (`str`, `os.PathLike`):\\n+ Location for mlflow logs to be stored.\\n+ run_id (`str`):\\n+ If specified, get the run with the specified UUID and log parameters and metrics under that run. The run’s\\n+ end time is unset and its status is set to running, but the run’s other attributes (source_version,\\n+ source_type, etc.) are not changed. Environment variable MLFLOW_RUN_ID has priority over this argument.\\n+ tags (`dict`, `str`):\\n+ An optional `dict` of `str` keys and values, or a `str` dump from a `dict`, to set as tags on the run. If a\\n+ run is being resumed, these tags are set on the resumed run. If a new run is being created, these tags are\\n+ set on the new run. Environment variable MLFLOW_TAGS has priority over this argument.\\n+ nested_run (`bool`):',\n", - " 'from_author': False},\n", - " {'body': '```suggestion\\r\\n run_name (`str`, *optional*):\\r\\n```',\n", - " 'diff_hunk': '@@ -398,9 +410,143 @@ def finish(self):\\n self.writer.close()\\n \\n \\n+class MLflowTracker(GeneralTracker):\\n+ \"\"\"\\n+ A `Tracker` class that supports `mlflow`. Should be initialized at the start of your script.\\n+\\n+ Args:\\n+ experiment_name (`str`):\\n+ Name of the experiment. Environment variable MLFLOW_EXPERIMENT_NAME has priority over this argument.\\n+ logging_dir (`str`, `os.PathLike`):\\n+ Location for mlflow logs to be stored.\\n+ run_id (`str`):\\n+ If specified, get the run with the specified UUID and log parameters and metrics under that run. The run’s\\n+ end time is unset and its status is set to running, but the run’s other attributes (source_version,\\n+ source_type, etc.) are not changed. Environment variable MLFLOW_RUN_ID has priority over this argument.\\n+ tags (`dict`, `str`):\\n+ An optional `dict` of `str` keys and values, or a `str` dump from a `dict`, to set as tags on the run. If a\\n+ run is being resumed, these tags are set on the resumed run. If a new run is being created, these tags are\\n+ set on the new run. Environment variable MLFLOW_TAGS has priority over this argument.\\n+ nested_run (`bool`):\\n+ Controls whether run is nested in parent run. True creates a nested run. Environment variable\\n+ MLFLOW_NESTED_RUN has priority over this argument.\\n+ run_name (`str`):',\n", - " 'from_author': False},\n", - " {'body': '```suggestion\\r\\n Name of new run (stored as a mlflow.runName tag). Used only when `run_id` is unspecified.\\r\\n```',\n", - " 'diff_hunk': '@@ -398,9 +410,143 @@ def finish(self):\\n self.writer.close()\\n \\n \\n+class MLflowTracker(GeneralTracker):\\n+ \"\"\"\\n+ A `Tracker` class that supports `mlflow`. Should be initialized at the start of your script.\\n+\\n+ Args:\\n+ experiment_name (`str`):\\n+ Name of the experiment. Environment variable MLFLOW_EXPERIMENT_NAME has priority over this argument.\\n+ logging_dir (`str`, `os.PathLike`):\\n+ Location for mlflow logs to be stored.\\n+ run_id (`str`):\\n+ If specified, get the run with the specified UUID and log parameters and metrics under that run. The run’s\\n+ end time is unset and its status is set to running, but the run’s other attributes (source_version,\\n+ source_type, etc.) are not changed. Environment variable MLFLOW_RUN_ID has priority over this argument.\\n+ tags (`dict`, `str`):\\n+ An optional `dict` of `str` keys and values, or a `str` dump from a `dict`, to set as tags on the run. If a\\n+ run is being resumed, these tags are set on the resumed run. If a new run is being created, these tags are\\n+ set on the new run. Environment variable MLFLOW_TAGS has priority over this argument.\\n+ nested_run (`bool`):\\n+ Controls whether run is nested in parent run. True creates a nested run. Environment variable\\n+ MLFLOW_NESTED_RUN has priority over this argument.\\n+ run_name (`str`):\\n+ Name of new run (stored as a mlflow.runName tag). Used only when run_id is unspecified.',\n", - " 'from_author': False},\n", - " {'body': '```suggestion\\r\\n description (`str`, *optional*):\\r\\n```',\n", - " 'diff_hunk': '@@ -398,9 +410,143 @@ def finish(self):\\n self.writer.close()\\n \\n \\n+class MLflowTracker(GeneralTracker):\\n+ \"\"\"\\n+ A `Tracker` class that supports `mlflow`. Should be initialized at the start of your script.\\n+\\n+ Args:\\n+ experiment_name (`str`):\\n+ Name of the experiment. Environment variable MLFLOW_EXPERIMENT_NAME has priority over this argument.\\n+ logging_dir (`str`, `os.PathLike`):\\n+ Location for mlflow logs to be stored.\\n+ run_id (`str`):\\n+ If specified, get the run with the specified UUID and log parameters and metrics under that run. The run’s\\n+ end time is unset and its status is set to running, but the run’s other attributes (source_version,\\n+ source_type, etc.) are not changed. Environment variable MLFLOW_RUN_ID has priority over this argument.\\n+ tags (`dict`, `str`):\\n+ An optional `dict` of `str` keys and values, or a `str` dump from a `dict`, to set as tags on the run. If a\\n+ run is being resumed, these tags are set on the resumed run. If a new run is being created, these tags are\\n+ set on the new run. Environment variable MLFLOW_TAGS has priority over this argument.\\n+ nested_run (`bool`):\\n+ Controls whether run is nested in parent run. True creates a nested run. Environment variable\\n+ MLFLOW_NESTED_RUN has priority over this argument.\\n+ run_name (`str`):\\n+ Name of new run (stored as a mlflow.runName tag). Used only when run_id is unspecified.\\n+ description (`str`):',\n", - " 'from_author': False},\n", - " {'body': \"Instead of storing those constants, let's use them directly when necessary (otherwise the reader has to figure out what those things are :-) 0\",\n", - " 'diff_hunk': '@@ -398,9 +410,143 @@ def finish(self):\\n self.writer.close()\\n \\n \\n+class MLflowTracker(GeneralTracker):\\n+ \"\"\"\\n+ A `Tracker` class that supports `mlflow`. Should be initialized at the start of your script.\\n+\\n+ Args:\\n+ experiment_name (`str`):\\n+ Name of the experiment. Environment variable MLFLOW_EXPERIMENT_NAME has priority over this argument.\\n+ logging_dir (`str`, `os.PathLike`):\\n+ Location for mlflow logs to be stored.\\n+ run_id (`str`):\\n+ If specified, get the run with the specified UUID and log parameters and metrics under that run. The run’s\\n+ end time is unset and its status is set to running, but the run’s other attributes (source_version,\\n+ source_type, etc.) are not changed. Environment variable MLFLOW_RUN_ID has priority over this argument.\\n+ tags (`dict`, `str`):\\n+ An optional `dict` of `str` keys and values, or a `str` dump from a `dict`, to set as tags on the run. If a\\n+ run is being resumed, these tags are set on the resumed run. If a new run is being created, these tags are\\n+ set on the new run. Environment variable MLFLOW_TAGS has priority over this argument.\\n+ nested_run (`bool`):\\n+ Controls whether run is nested in parent run. True creates a nested run. Environment variable\\n+ MLFLOW_NESTED_RUN has priority over this argument.\\n+ run_name (`str`):\\n+ Name of new run (stored as a mlflow.runName tag). Used only when run_id is unspecified.\\n+ description (`str`):\\n+ An optional string that populates the description box of the run. If a run is being resumed, the\\n+ description is set on the resumed run. If a new run is being created, the description is set on the new\\n+ run.\\n+ \"\"\"\\n+\\n+ name = \"mlflow\"\\n+ requires_logging_directory = True\\n+\\n+ def __init__(\\n+ self,\\n+ experiment_name: str = None,\\n+ logging_dir: Optional[Union[str, os.PathLike]] = \".\",\\n+ run_id: Optional[str] = None,\\n+ tags: Optional[Union[Dict[str, Any], str]] = None,\\n+ nested_run: Optional[bool] = False,\\n+ run_name: Optional[str] = None,\\n+ description: Optional[str] = None,\\n+ ):\\n+\\n+ self._MAX_PARAM_VAL_LENGTH = mlflow.utils.validation.MAX_PARAM_VAL_LENGTH\\n+ self._MAX_PARAMS_TAGS_PER_BATCH = mlflow.utils.validation.MAX_PARAMS_TAGS_PER_BATCH',\n", - " 'from_author': False}],\n", - " 'context': [{'body': '_The documentation is not available anymore as the PR was closed or merged._',\n", - " 'from_author': False},\n", - " {'body': '@sgugger, how is it now?', 'from_author': True}],\n", - " 'url': 'https://api.github.com/repos/huggingface/accelerate/pulls/794'},\n", - " 1100987961: {'diff': 'diff --git a/src/accelerate/utils/modeling.py b/src/accelerate/utils/modeling.py\\nindex c14833557..e471731a4 100644\\n--- a/src/accelerate/utils/modeling.py\\n+++ b/src/accelerate/utils/modeling.py\\n@@ -476,7 +476,11 @@ def infer_auto_device_map(\\n current_memory_used = 0\\n \\n # Direct submodules and parameters\\n- modules_to_treat = list(model.named_parameters(recurse=False)) + list(model.named_children())\\n+ modules_to_treat = (\\n+ list(model.named_parameters(recurse=False))\\n+ + list(model.named_children())\\n+ + list(model.named_buffers(recurse=False))\\n+ )\\n # Initialize maximum largest layer, to know which space to keep in memory\\n max_layer_size, max_layer_names = get_max_layer_size(modules_to_treat, module_sizes, no_split_module_classes)\\n \\n',\n", - " 'code_comments': [],\n", - " 'context': [{'body': '_The documentation is not available anymore as the PR was closed or merged._',\n", - " 'from_author': False},\n", - " {'body': 'So if I understood it correctly, if you have some modules such as `nn.BatchNorm` in your model (as it is done in the `accelerate` CI test), the buffers `running_mean` and `running_var` will not be stored inside `model._buffers` but in `model.named_buffers()`. That is why I had to \"filter out\" the buffers by considering only the ones that are inside `model._buffers` and `model.named_buffers()` \\r\\n\\r\\nHere is an example that I have quickly tried: \\r\\n```\\r\\nmodel = nn.Sequential(nn.Linear(1, 1), nn.BatchNorm1d(1), nn.Embedding(1, 1), nn.LayerNorm(1)) \\r\\nprint(list(model.named_buffers()))\\r\\n>>>[(\\'1.running_mean\\', tensor([0.])), (\\'1.running_var\\', tensor([1.])), (\\'1.num_batches_tracked\\', tensor(0))]\\r\\nprint(list(model._buffers))\\r\\n>>> []\\r\\nmodel.register_buffer(\"position_bias\", torch.ones(1))\\r\\nprint(list(model._buffers))\\r\\n>>> [\\'position_bias\\']\\r\\nprint(list(model.named_buffers()))\\r\\n>>> [(\\'position_bias\\', tensor([1.])), (\\'1.running_mean\\', tensor([0.])), (\\'1.running_var\\', tensor([1.])), (\\'1.num_batches_tracked\\', tensor(0))]\\r\\n```\\r\\n\\r\\n',\n", - " 'from_author': True},\n", - " {'body': \"I think in this case, it's just the difference between `named_buffers(recurse=True)` and `named_buffers(recurse=False)`. I'm not convinced this fix is the right fix, so would like to learn more what is failing.\",\n", - " 'from_author': False},\n", - " {'body': \"Ah yes I see, you're probably right here! Let me dig a bit more and get back to you here \",\n", - " 'from_author': True},\n", - " {'body': '@sgugger I might have more clue on what is failing \\r\\nI think that the problem comes from the fact that the `infer_auto_device_map` does take into account only modules and submodules, I have made a script below to better illustrate the problem\\r\\n\\r\\n```\\r\\nimport torch.nn as nn\\r\\nimport torch\\r\\nfrom accelerate.utils import infer_auto_device_map\\r\\nfrom accelerate.big_modeling import dispatch_model\\r\\n\\r\\nclass SubModule(nn.Module):\\r\\n def __init__(self):\\r\\n super().__init__()\\r\\n\\r\\n self.register_buffer(\"position_bias\", torch.ones(1, 1000))\\r\\n\\r\\nclass Model(nn.Module):\\r\\n def __init__(self, wrap_module=True):\\r\\n super().__init__()\\r\\n self.l1 = nn.Linear(1000, 1000)\\r\\n self.l2 = nn.Linear(1000, 1000)\\r\\n self.l3 = nn.Linear(1000, 1000)\\r\\n\\r\\n self.bn1 = nn.BatchNorm1d(1000)\\r\\n self.bn2 = nn.BatchNorm1d(1000)\\r\\n\\r\\n if wrap_module:\\r\\n self.position_bias = SubModule()\\r\\n else:\\r\\n self.register_buffer(\"position_bias\", torch.ones(1, 1000))\\r\\n\\r\\n# Test 1: wrapping with a module - this will pass\\r\\nmodel = Model()\\r\\ndevice_map = infer_auto_device_map(model, {0:\"10MB\", \"cpu\":\"100MB\"})\\r\\nmodel = dispatch_model(model, device_map)\\r\\n\\r\\n# Test 2: below will fail\\r\\nmodel = Model(wrap_module=False)\\r\\ndevice_map = infer_auto_device_map(model, {0:\"10MB\", \"cpu\":\"100MB\"})\\r\\nmodel = dispatch_model(model, device_map)\\r\\n```\\r\\nLet me know what do you think! \\r\\n\\r\\nI guess this failed for `BartPreTrainedModel` since the `position_bias` buffer is on the parent module itself',\n", - " 'from_author': True},\n", - " {'body': 'Ah, in this case it looks very much like the problem #747 fixed for top-level parameters, so the fix should be pretty similar here too!',\n", - " 'from_author': False},\n", - " {'body': 'The whole testing suite (including slow tests) is green! 🟢 Merging ! \\r\\n\\r\\n',\n", - " 'from_author': True}],\n", - " 'url': 'https://api.github.com/repos/huggingface/accelerate/pulls/792'},\n", - " 1098848698: {'diff': 'diff --git a/src/accelerate/data_loader.py b/src/accelerate/data_loader.py\\nindex cdae54af9..8fbc8758f 100644\\n--- a/src/accelerate/data_loader.py\\n+++ b/src/accelerate/data_loader.py\\n@@ -352,16 +352,16 @@ class DataLoaderShard(DataLoader):\\n - **total_dataset_length** (`int`) -- Total length of the inner dataset across all processes.\\n \"\"\"\\n \\n- def __init__(self, dataset, device=None, rng_types=None, generator=None, **kwargs):\\n+ def __init__(self, dataset, device=None, rng_types=None, synchronized_generator=None, **kwargs):\\n super().__init__(dataset, **kwargs)\\n self.device = device\\n self.rng_types = rng_types\\n- self.generator = generator\\n+ self.synchronized_generator = synchronized_generator\\n self.gradient_state = GradientState()\\n \\n def __iter__(self):\\n if self.rng_types is not None:\\n- synchronize_rng_states(self.rng_types, self.generator)\\n+ synchronize_rng_states(self.rng_types, self.synchronized_generator)\\n self.gradient_state._set_end_of_dataloader(False)\\n try:\\n length = getattr(self.dataset, \"total_dataset_length\", len(self.dataset))\\n@@ -650,12 +650,12 @@ def prepare_data_loader(\\n # Iterable dataset doesn\\'t like batch_sampler, but data_loader creates a default one for it\\n new_batch_sampler = dataloader.batch_sampler if not isinstance(new_dataset, IterableDataset) else None\\n sampler_is_batch_sampler = False\\n- generator = getattr(dataloader, \"generator\", None)\\n+ synchronized_generator = None\\n # No change if no multiprocess\\n if (num_processes != 1 or state.distributed_type == DistributedType.MEGATRON_LM) and not dispatch_batches:\\n if isinstance(new_dataset, IterableDataset):\\n if getattr(dataloader.dataset, \"generator\", None) is not None:\\n- generator = dataloader.dataset.generator\\n+ synchronized_generator = dataloader.dataset.generator\\n new_dataset = IterableDatasetShard(\\n new_dataset,\\n batch_size=dataloader.batch_size,\\n@@ -674,8 +674,7 @@ def prepare_data_loader(\\n if hasattr(sampler, \"generator\"):\\n if sampler.generator is None:\\n sampler.generator = torch.Generator()\\n- generator = sampler.generator\\n- generator.manual_seed(int(torch.empty((), dtype=torch.int64).random_().item()))\\n+ synchronized_generator = sampler.generator\\n \\n batch_sampler = dataloader.sampler if sampler_is_batch_sampler else dataloader.batch_sampler\\n new_batch_sampler = BatchSamplerShard(\\n@@ -692,10 +691,9 @@ def prepare_data_loader(\\n \"sampler\",\\n \"batch_sampler\",\\n \"drop_last\",\\n- \"generator\",\\n ]\\n \\n- if rng_types is not None and generator is None and \"generator\" in rng_types:\\n+ if rng_types is not None and synchronized_generator is None and \"generator\" in rng_types:\\n rng_types.remove(\"generator\")\\n \\n kwargs = {\\n@@ -710,6 +708,7 @@ def prepare_data_loader(\\n kwargs[\"batch_size\"] = dataloader.batch_size // num_processes if split_batches else dataloader.batch_size\\n \\n if dispatch_batches:\\n+ kwargs.pop(\"generator\")\\n dataloader = DataLoaderDispatcher(\\n new_dataset,\\n split_batches=split_batches,\\n@@ -722,9 +721,9 @@ def prepare_data_loader(\\n new_dataset,\\n device=device if put_on_device and state.distributed_type != DistributedType.TPU else None,\\n sampler=new_batch_sampler,\\n- batch_size=getattr(dataloader, \"batch_size\", _PYTORCH_DATALOADER_KWARGS[\"batch_size\"]),\\n+ batch_size=dataloader.batch_size,\\n rng_types=rng_types,\\n- generator=generator,\\n+ synchronized_generator=synchronized_generator,\\n **kwargs,\\n )\\n else:\\n@@ -733,7 +732,7 @@ def prepare_data_loader(\\n device=device if put_on_device and state.distributed_type != DistributedType.TPU else None,\\n batch_sampler=new_batch_sampler,\\n rng_types=rng_types,\\n- generator=generator,\\n+ synchronized_generator=synchronized_generator,\\n **kwargs,\\n )\\n \\n',\n", - " 'code_comments': [{'body': 'We should have an else branch here in case `self.rng_types` contains something different than `\"generator\"`, by sending `None` for the generator argument (since we have removed `\"generator\"` from the rng_types at this stage, if there is none available.',\n", - " 'diff_hunk': '@@ -361,7 +361,15 @@ def __init__(self, dataset, device=None, rng_types=None, generator=None, **kwarg\\n \\n def __iter__(self):\\n if self.rng_types is not None:\\n- synchronize_rng_states(self.rng_types, self.generator)\\n+ batch_sampler = self.sampler if isinstance(self.sampler, BatchSampler) else self.batch_sampler\\n+ sampler = (\\n+ batch_sampler.batch_sampler.sampler\\n+ if hasattr(batch_sampler, \"batch_sampler\")\\n+ else batch_sampler.sampler\\n+ )\\n+ if hasattr(sampler, \"generator\"):\\n+ generator = sampler.generator\\n+ synchronize_rng_states(self.rng_types, generator)\\n self.gradient_state._set_end_of_dataloader(False)',\n", - " 'from_author': False},\n", - " {'body': 'I wonder why `dataloader.sampler` can be `BatchSampler`?',\n", - " 'diff_hunk': '@@ -361,7 +361,15 @@ def __init__(self, dataset, device=None, rng_types=None, generator=None, **kwarg\\n \\n def __iter__(self):\\n if self.rng_types is not None:\\n- synchronize_rng_states(self.rng_types, self.generator)\\n+ batch_sampler = self.sampler if isinstance(self.sampler, BatchSampler) else self.batch_sampler',\n", - " 'from_author': False},\n", - " {'body': 'See the example script above in the PR, it has various scenarios. ',\n", - " 'diff_hunk': '@@ -361,7 +361,15 @@ def __init__(self, dataset, device=None, rng_types=None, generator=None, **kwarg\\n \\n def __iter__(self):\\n if self.rng_types is not None:\\n- synchronize_rng_states(self.rng_types, self.generator)\\n+ batch_sampler = self.sampler if isinstance(self.sampler, BatchSampler) else self.batch_sampler',\n", - " 'from_author': True},\n", - " {'body': 'see https://github.com/huggingface/accelerate/issues/679, it helps reduce the number of queries\\r\\n',\n", - " 'diff_hunk': '@@ -361,7 +361,15 @@ def __init__(self, dataset, device=None, rng_types=None, generator=None, **kwarg\\n \\n def __iter__(self):\\n if self.rng_types is not None:\\n- synchronize_rng_states(self.rng_types, self.generator)\\n+ batch_sampler = self.sampler if isinstance(self.sampler, BatchSampler) else self.batch_sampler',\n", - " 'from_author': True},\n", - " {'body': 'https://huggingface.co/docs/datasets/v2.4.0/en/use_with_pytorch#use-a-batchsampler',\n", - " 'diff_hunk': '@@ -361,7 +361,15 @@ def __init__(self, dataset, device=None, rng_types=None, generator=None, **kwarg\\n \\n def __iter__(self):\\n if self.rng_types is not None:\\n- synchronize_rng_states(self.rng_types, self.generator)\\n+ batch_sampler = self.sampler if isinstance(self.sampler, BatchSampler) else self.batch_sampler',\n", - " 'from_author': True},\n", - " {'body': 'Wow! Thanks a lot!\\r\\nI spent hours finding a way to query my dataset with a list of indices instead of collating data outside the dataset(so that I can write batching logic in the dataset/reduce the number of queries), and failed(thus I was forced to use DataLoader as a sampler(sharded by accelerate), as mentioned in #624 ).\\r\\nNow I know I can just use batch sampler as sampler without passing batch size to `DataLoader`.\\r\\nThanks!!!😁👍',\n", - " 'diff_hunk': '@@ -361,7 +361,15 @@ def __init__(self, dataset, device=None, rng_types=None, generator=None, **kwarg\\n \\n def __iter__(self):\\n if self.rng_types is not None:\\n- synchronize_rng_states(self.rng_types, self.generator)\\n+ batch_sampler = self.sampler if isinstance(self.sampler, BatchSampler) else self.batch_sampler',\n", - " 'from_author': False},\n", - " {'body': 'We should probably leave this one out and pass it as the generator (as initially suggested in your PR).',\n", - " 'diff_hunk': '@@ -650,12 +650,12 @@ def prepare_data_loader(\\n # Iterable dataset doesn\\'t like batch_sampler, but data_loader creates a default one for it\\n new_batch_sampler = dataloader.batch_sampler if not isinstance(new_dataset, IterableDataset) else None\\n sampler_is_batch_sampler = False\\n- generator = getattr(dataloader, \"generator\", None)\\n+ synchronized_generator = getattr(dataloader, \"generator\", None)',\n", - " 'from_author': False},\n", - " {'body': 'Here we should pass the main generator of the original dataloader if there was one (see comment above).',\n", - " 'diff_hunk': '@@ -722,9 +721,9 @@ def prepare_data_loader(\\n new_dataset,\\n device=device if put_on_device and state.distributed_type != DistributedType.TPU else None,\\n sampler=new_batch_sampler,\\n- batch_size=getattr(dataloader, \"batch_size\", _PYTORCH_DATALOADER_KWARGS[\"batch_size\"]),\\n+ batch_size=dataloader.batch_size,\\n rng_types=rng_types,\\n- generator=generator,',\n", - " 'from_author': False},\n", - " {'body': 'Same there.',\n", - " 'diff_hunk': '@@ -733,7 +732,7 @@ def prepare_data_loader(\\n device=device if put_on_device and state.distributed_type != DistributedType.TPU else None,\\n batch_sampler=new_batch_sampler,\\n rng_types=rng_types,\\n- generator=generator,',\n", - " 'from_author': False},\n", - " {'body': 'I remove `\"generator\"` from `ignore_kwargs`. It should be enough.',\n", - " 'diff_hunk': '@@ -722,9 +721,9 @@ def prepare_data_loader(\\n new_dataset,\\n device=device if put_on_device and state.distributed_type != DistributedType.TPU else None,\\n sampler=new_batch_sampler,\\n- batch_size=getattr(dataloader, \"batch_size\", _PYTORCH_DATALOADER_KWARGS[\"batch_size\"]),\\n+ batch_size=dataloader.batch_size,\\n rng_types=rng_types,\\n- generator=generator,',\n", - " 'from_author': False},\n", - " {'body': '\"generator\" is removed from `ignore_kwargs`.',\n", - " 'diff_hunk': '@@ -650,12 +650,12 @@ def prepare_data_loader(\\n # Iterable dataset doesn\\'t like batch_sampler, but data_loader creates a default one for it\\n new_batch_sampler = dataloader.batch_sampler if not isinstance(new_dataset, IterableDataset) else None\\n sampler_is_batch_sampler = False\\n- generator = getattr(dataloader, \"generator\", None)\\n+ synchronized_generator = getattr(dataloader, \"generator\", None)',\n", - " 'from_author': False},\n", - " {'body': 'Yes, but this one should not be synchronized I think. Or do we want to default to the dataloader generator if tehre is no generator in the sampler?',\n", - " 'diff_hunk': '@@ -650,12 +650,12 @@ def prepare_data_loader(\\n # Iterable dataset doesn\\'t like batch_sampler, but data_loader creates a default one for it\\n new_batch_sampler = dataloader.batch_sampler if not isinstance(new_dataset, IterableDataset) else None\\n sampler_is_batch_sampler = False\\n- generator = getattr(dataloader, \"generator\", None)\\n+ synchronized_generator = getattr(dataloader, \"generator\", None)',\n", - " 'from_author': False},\n", - " {'body': 'Indeed, resolving comment.',\n", - " 'diff_hunk': '@@ -722,9 +721,9 @@ def prepare_data_loader(\\n new_dataset,\\n device=device if put_on_device and state.distributed_type != DistributedType.TPU else None,\\n sampler=new_batch_sampler,\\n- batch_size=getattr(dataloader, \"batch_size\", _PYTORCH_DATALOADER_KWARGS[\"batch_size\"]),\\n+ batch_size=dataloader.batch_size,\\n rng_types=rng_types,\\n- generator=generator,',\n", - " 'from_author': False},\n", - " {'body': \"Sorry I didn't get your point. Yes, this line should be `synchronized_generator = None`, as #790 .\",\n", - " 'diff_hunk': '@@ -650,12 +650,12 @@ def prepare_data_loader(\\n # Iterable dataset doesn\\'t like batch_sampler, but data_loader creates a default one for it\\n new_batch_sampler = dataloader.batch_sampler if not isinstance(new_dataset, IterableDataset) else None\\n sampler_is_batch_sampler = False\\n- generator = getattr(dataloader, \"generator\", None)\\n+ synchronized_generator = getattr(dataloader, \"generator\", None)',\n", - " 'from_author': False},\n", - " {'body': 'Oops! Yup, resolved the comment',\n", - " 'diff_hunk': '@@ -650,12 +650,12 @@ def prepare_data_loader(\\n # Iterable dataset doesn\\'t like batch_sampler, but data_loader creates a default one for it\\n new_batch_sampler = dataloader.batch_sampler if not isinstance(new_dataset, IterableDataset) else None\\n sampler_is_batch_sampler = False\\n- generator = getattr(dataloader, \"generator\", None)\\n+ synchronized_generator = getattr(dataloader, \"generator\", None)',\n", - " 'from_author': True}],\n", - " 'context': [{'body': '_The documentation is not available anymore as the PR was closed or merged._',\n", - " 'from_author': False},\n", - " {'body': 'This PR currently will break \\r\\nhttps://github.com/huggingface/accelerate/blob/37b2aa0173fe9b6f46e7df07b92bb55e54b77eb3/src/accelerate/data_loader.py#L653\\r\\nhttps://github.com/huggingface/accelerate/blob/37b2aa0173fe9b6f46e7df07b92bb55e54b77eb3/src/accelerate/data_loader.py#L658\\r\\nhttps://github.com/huggingface/accelerate/blob/37b2aa0173fe9b6f46e7df07b92bb55e54b77eb3/src/accelerate/data_loader.py#L677\\r\\n(Of course https://github.com/huggingface/accelerate/blob/37b2aa0173fe9b6f46e7df07b92bb55e54b77eb3/src/accelerate/data_loader.py#L698 should be modified)\\r\\n\\r\\n---\\r\\n\\r\\nThis code duplication (for finding the sampler) also impairs readability instead of improving readability.\\r\\n```python\\r\\nbatch_sampler = self.sampler if isinstance(self.sampler, BatchSampler) else self.batch_sampler\\r\\nsampler = (\\r\\n batch_sampler.batch_sampler.sampler\\r\\n if hasattr(batch_sampler, \"batch_sampler\")\\r\\n else batch_sampler.sampler\\r\\n)\\r\\nif hasattr(sampler, \"generator\"):\\r\\n generator = sampler.generator\\r\\n synchronize_rng_states(self.rng_types, generator)\\r\\n```\\r\\nI believe what needs to be synchronized is IterableDataset\\'s generator(according to `IterableDatasetShard` implementation) OR sampler\\'s generator. We can store the synchronized generator in a private attribute.\\r\\n',\n", - " 'from_author': False},\n", - " {'body': '@YouJiacheng I am not seeing any breaks in all the lines you mention, which are completely orthogonal to the change suggested. As for the code duplication, let us worry about readability as maintainers :-) We cannot store the generator as a private attribute as it is fetched in two different functions.',\n", - " 'from_author': False},\n", - " {'body': 'By \"break\", I means that these lines will become useless. And for `generator = dataloader.dataset.generator`, it is intended to synchronize the generator of IterableDataset. So after proposed change, IterableDataset\\'s generator won\\'t be synchronized.',\n", - " 'from_author': False},\n", - " {'body': \"Ah!, I get what you mean, thanks for clarifying!\\r\\n\\r\\nWe should indeed make the difference between the `sampler_generator` (for batch samplers) or `dataset.generator` (in iterable dataset) and the Dataloader generator. The latter should be passed along again when creating the final DataLoader (or use the default if it's none) while the formers should be the one synchronized.\\r\\n\\r\\nThe check on `rng_types` should probably be moved to the init of `BatchSamplerShard`/`IterableDatasetShard`.\",\n", - " 'from_author': False},\n", - " {'body': 'It\\'s hard to explain my proposal using text. So I open a parallel PR to show the code.(It is somewhat \"pseudocode\"/proof of concept, since I didn\\'t test it).',\n", - " 'from_author': False},\n", - " {'body': \"@YouJiacheng Thanks for taking the time to draft a PR to show your points, it's much clearer this way! I think we need to merge the two PRs somehow as they both contain important things the other has not.\",\n", - " 'from_author': False}],\n", - " 'url': 'https://api.github.com/repos/huggingface/accelerate/pulls/789'},\n", - " 1094266566: {'diff': 'diff --git a/src/accelerate/launchers.py b/src/accelerate/launchers.py\\nindex 0387109b6..ea8fffa51 100644\\n--- a/src/accelerate/launchers.py\\n+++ b/src/accelerate/launchers.py\\n@@ -43,12 +43,12 @@ def notebook_launcher(function, args=(), num_processes=None, use_fp16=False, mix\\n The port to use to communicate between processes when launching a multi-GPU training.\\n \"\"\"\\n # Are we in a google colab or a Kaggle Kernel?\\n+ in_colab = False\\n+ in_kaggle = False\\n if any(key.startswith(\"KAGGLE\") for key in os.environ.keys()):\\n- in_colab_or_kaggle = True\\n+ in_kaggle = True\\n elif \"IPython\" in sys.modules:\\n- in_colab_or_kaggle = \"google.colab\" in str(sys.modules[\"IPython\"].get_ipython())\\n- else:\\n- in_colab_or_kaggle = False\\n+ in_colab = \"google.colab\" in str(sys.modules[\"IPython\"].get_ipython())\\n \\n try:\\n mixed_precision = PrecisionType(mixed_precision.lower())\\n@@ -57,31 +57,29 @@ def notebook_launcher(function, args=(), num_processes=None, use_fp16=False, mix\\n f\"Unknown mixed_precision mode: {args.mixed_precision.lower()}. Choose between {PrecisionType.list()}.\"\\n )\\n \\n- if in_colab_or_kaggle:\\n- if os.environ.get(\"TPU_NAME\", None) is not None:\\n- # TPU launch\\n- import torch_xla.distributed.xla_multiprocessing as xmp\\n-\\n- if len(AcceleratorState._shared_state) > 0:\\n- raise ValueError(\\n- \"To train on TPU in Colab or Kaggle Kernel, the `Accelerator` should only be initialized inside \"\\n- \"your training function. Restart your notebook and make sure no cells initializes an \"\\n- \"`Accelerator`.\"\\n- )\\n- if num_processes is None:\\n- num_processes = 8\\n+ if (in_colab or in_kaggle) and (os.environ.get(\"TPU_NAME\", None) is not None):\\n+ # TPU launch\\n+ import torch_xla.distributed.xla_multiprocessing as xmp\\n \\n- launcher = PrepareForLaunch(function, distributed_type=\"TPU\")\\n- print(f\"Launching a training on {num_processes} TPU cores.\")\\n- xmp.spawn(launcher, args=args, nprocs=num_processes, start_method=\"fork\")\\n+ if len(AcceleratorState._shared_state) > 0:\\n+ raise ValueError(\\n+ \"To train on TPU in Colab or Kaggle Kernel, the `Accelerator` should only be initialized inside \"\\n+ \"your training function. Restart your notebook and make sure no cells initializes an \"\\n+ \"`Accelerator`.\"\\n+ )\\n+ if num_processes is None:\\n+ num_processes = 8\\n+\\n+ launcher = PrepareForLaunch(function, distributed_type=\"TPU\")\\n+ print(f\"Launching a training on {num_processes} TPU cores.\")\\n+ xmp.spawn(launcher, args=args, nprocs=num_processes, start_method=\"fork\")\\n+ elif in_colab:\\n+ # No need for a distributed launch otherwise as it\\'s either CPU or one GPU.\\n+ if torch.cuda.is_available():\\n+ print(\"Launching training on one GPU.\")\\n else:\\n- # No need for a distributed launch otherwise as it\\'s either CPU or one GPU.\\n- if torch.cuda.is_available():\\n- print(\"Launching training on one GPU.\")\\n- else:\\n- print(\"Launching training on one CPU.\")\\n- function(*args)\\n-\\n+ print(\"Launching training on one CPU.\")\\n+ function(*args)\\n else:\\n if num_processes is None:\\n raise ValueError(\\n',\n", - " 'code_comments': [],\n", - " 'context': [{'body': '_The documentation is not available anymore as the PR was closed or merged._',\n", - " 'from_author': False}],\n", - " 'url': 'https://api.github.com/repos/huggingface/accelerate/pulls/783'},\n", - " 1094235843: {'diff': 'diff --git a/.devcontainer/devcontainer.json b/.devcontainer/devcontainer.json\\nnew file mode 100644\\nindex 000000000..9d44afde7\\n--- /dev/null\\n+++ b/.devcontainer/devcontainer.json\\n@@ -0,0 +1,25 @@\\n+// File only needed for VSCode users to have proper Docker based interpreters\\n+{\\n+ \"name\": \"accelerate_dev_environment\",\\n+ \"build\": {\\n+ // ACTION NEEDED: comment/uncomment the relevant line depending on whether you are in a CPU/GPU environment\\n+ \"dockerfile\": \"../docker/accelerate-cpu/Dockerfile\"\\n+// \"dockerfile\": \"../docker/accelerate-gpu/Dockerfile\"\\n+ },\\n+ \"runArgs\": [\\n+ // ACTION NEEDED: uncomment the next line if your local machine has GPUs available\\n+// \"--gpus\", \"all\",\\n+ // Enable the docker container to access system resources\\n+ \"--ipc\", \"host\"\\n+ ],\\n+ \"remoteEnv\": {\\n+ \"PYTHONPATH\": \"${containerEnv:PATH}:${containerWorkspaceFolder}\"\\n+ },\\n+ \"extensions\": [\\n+ // Ensure we have IntelliSense in VSCode when running inside container\\n+ \"ms-python.python\"\\n+ ],\\n+ \"workspaceFolder\": \"/workspaces/accelerate\",\\n+ // Need git for VSCode to color code modifications. Only runs when building environment.\\n+ \"onCreateCommand\": \"apt-get update && apt-get install -y git && pip install -e \\'.[dev]\\'\"\\n+}\\n\\\\ No newline at end of file\\ndiff --git a/CONTRIBUTING.md b/CONTRIBUTING.md\\nindex fcc7d9b1c..d0e142b16 100644\\n--- a/CONTRIBUTING.md\\n+++ b/CONTRIBUTING.md\\n@@ -130,6 +130,9 @@ Follow these steps to start contributing:\\n it with `pip uninstall accelerate` before reinstalling it in editable\\n mode with the `-e` flag.)\\n \\n+ Alternatively, if you are using [Visual Studio Code](https://code.visualstudio.com/Download), the fastest way to get set up is by using\\n+ the provided Dev Container. Documentation on how to get started with dev containers is available [here](https://code.visualstudio.com/docs/remote/containers).\\n+\\n 5. Develop the features on your branch.\\n \\n As you work on the features, you should make sure that the test suite\\n',\n", - " 'code_comments': [{'body': 'I think we should enable CPU by default and have the user work on the container to activate GPU.',\n", - " 'diff_hunk': '@@ -0,0 +1,25 @@\\n+// File only needed for VSCode users to have proper Docker based interpreters\\n+{\\n+ \"name\": \"accelerate_dev_environment\",\\n+ \"build\": {\\n+ // ACTION NEEDED: comment/uncomment the relevant line depending on whether you are in a CPU/GPU environment\\n+ // \"dockerfile\": \"../docker/accelerate-cpu/Dockerfile\"',\n", - " 'from_author': False},\n", - " {'body': 'Same here.',\n", - " 'diff_hunk': '@@ -0,0 +1,25 @@\\n+// File only needed for VSCode users to have proper Docker based interpreters\\n+{\\n+ \"name\": \"accelerate_dev_environment\",\\n+ \"build\": {\\n+ // ACTION NEEDED: comment/uncomment the relevant line depending on whether you are in a CPU/GPU environment\\n+ // \"dockerfile\": \"../docker/accelerate-cpu/Dockerfile\"\\n+ \"dockerfile\": \"../docker/accelerate-gpu/Dockerfile\"\\n+ },\\n+ \"runArgs\": [\\n+ // ACTION NEEDED: comment the next line if your local machine has no GPUs available',\n", - " 'from_author': False},\n", - " {'body': 'Sure, I was torn on this, so am happy to go the other way.',\n", - " 'diff_hunk': '@@ -0,0 +1,25 @@\\n+// File only needed for VSCode users to have proper Docker based interpreters\\n+{\\n+ \"name\": \"accelerate_dev_environment\",\\n+ \"build\": {\\n+ // ACTION NEEDED: comment/uncomment the relevant line depending on whether you are in a CPU/GPU environment\\n+ // \"dockerfile\": \"../docker/accelerate-cpu/Dockerfile\"',\n", - " 'from_author': True}],\n", - " 'context': [{'body': '_The documentation is not available anymore as the PR was closed or merged._',\n", - " 'from_author': False},\n", - " {'body': \"> Also, if the users change the devcontainer.json for their local use, won't those changes be pushed to GitHub automatically?\\r\\n\\r\\nUnfortunately, VSCode doesn't (yet) support multiple dev container configurations to my knowledge, so we have to rely on people not committing dev container changes.\",\n", - " 'from_author': True}],\n", - " 'url': 'https://api.github.com/repos/huggingface/accelerate/pulls/782'},\n", - " 1094096724: {'diff': 'diff --git a/src/accelerate/accelerator.py b/src/accelerate/accelerator.py\\nindex bf17f57ce..81759962c 100644\\n--- a/src/accelerate/accelerator.py\\n+++ b/src/accelerate/accelerator.py\\n@@ -153,6 +153,10 @@ class Accelerator:\\n If set to `True`, the dataloader prepared by the Accelerator is only iterated through on the main process\\n and then the batches are split and broadcast to each process. Will default to `True` for `DataLoader` whose\\n underlying dataset is an `IterableDataset`, `False` otherwise.\\n+ even_batches (`bool`, *optional*, defaults to `True`):\\n+ If set to `True`, in cases where the total batch size across all processes does not exactly divide the\\n+ dataset, samples at the start of the dataset will be duplicated so the batch can be divided equally among\\n+ all workers.\\n step_scheduler_with_optimizer (`bool`, *optional`, defaults to `True`):\\n Set `True` if the learning rate scheduler is stepped at the same time as the optimizer, `False` if only\\n done under certain circumstances (at the end of each epoch, for instance).\\n@@ -191,6 +195,7 @@ def __init__(\\n log_with: Optional[List[Union[str, LoggerType, GeneralTracker]]] = None,\\n logging_dir: Optional[Union[str, os.PathLike]] = None,\\n dispatch_batches: Optional[bool] = None,\\n+ even_batches: bool = True,\\n step_scheduler_with_optimizer: bool = True,\\n kwargs_handlers: Optional[List[KwargsHandler]] = None,\\n ):\\n@@ -305,6 +310,7 @@ def __init__(\\n raise ImportError(\\n \"Using `DataLoaderDispatcher` requires PyTorch 1.8.0 minimum. You have {torch.__version__}.\"\\n )\\n+ self.even_batches = even_batches\\n self.step_scheduler_with_optimizer = step_scheduler_with_optimizer\\n \\n # Mixed precision attributes\\n@@ -1109,6 +1115,7 @@ def prepare_data_loader(self, data_loader: torch.utils.data.DataLoader, device_p\\n put_on_device=device_placement,\\n rng_types=self.rng_types.copy(),\\n dispatch_batches=self.dispatch_batches,\\n+ even_batches=self.even_batches,\\n )\\n \\n def prepare_optimizer(self, optimizer: torch.optim.Optimizer, device_placement=None):\\ndiff --git a/src/accelerate/data_loader.py b/src/accelerate/data_loader.py\\nindex cdae54af9..39f843430 100644\\n--- a/src/accelerate/data_loader.py\\n+++ b/src/accelerate/data_loader.py\\n@@ -118,7 +118,8 @@ class BatchSamplerShard(BatchSampler):\\n \\n \\n \\n- This does not support `BatchSampler` with varying batch size yet.\\n+ `BatchSampler`s with varying batch sizes are not enabled by default. To enable this behaviour, set `even_batches`\\n+ equal to `False`\\n \\n \"\"\"\\n \\n@@ -570,6 +571,7 @@ def prepare_data_loader(\\n put_on_device: bool = False,\\n rng_types: Optional[List[Union[str, RNGType]]] = None,\\n dispatch_batches: Optional[bool] = None,\\n+ even_batches: bool = True,\\n ) -> DataLoader:\\n \"\"\"\\n Wraps a PyTorch `DataLoader` to generate batches for one of the processes only.\\n@@ -615,15 +617,21 @@ def prepare_data_loader(\\n If set to `True`, the datalaoder prepared is only iterated through on the main process and then the batches\\n are split and broadcast to each process. Will default to `True` when the underlying dataset is an\\n `IterableDataset`, `False` otherwise.\\n+ even_batches (`bool`, *optional*, defaults to `True`):\\n+ If set to `True`, in cases where the total batch size across all processes does not exactly divide the\\n+ dataset, samples at the start of the dataset will be duplicated so the batch can be divided equally among\\n+ all workers.\\n \\n Returns:\\n `torch.utils.data.dataloader.DataLoader`: A new data loader that will yield the portion of the batches\\n \\n \\n \\n- This does not support `BatchSampler` with varying batch size yet.\\n+ `BatchSampler`s with varying batch sizes are not enabled by default. To enable this behaviour, set `even_batches`\\n+ equal to `False`\\n \\n- \"\"\"\\n+ \\n+ \"\"\"\\n if dispatch_batches is None:\\n if is_torch_version(\"<\", \"1.8.0\") or not put_on_device:\\n dispatch_batches = False\\n@@ -683,6 +691,7 @@ def prepare_data_loader(\\n num_processes=num_processes,\\n process_index=process_index,\\n split_batches=split_batches,\\n+ even_batches=even_batches,\\n )\\n \\n # We ignore all of those since they are all dealt with by our new_batch_sampler\\ndiff --git a/src/accelerate/test_utils/scripts/test_distributed_data_loop.py b/src/accelerate/test_utils/scripts/test_distributed_data_loop.py\\nnew file mode 100644\\nindex 000000000..eaf7c6a34\\n--- /dev/null\\n+++ b/src/accelerate/test_utils/scripts/test_distributed_data_loop.py\\n@@ -0,0 +1,113 @@\\n+#!/usr/bin/env python\\n+\\n+# Copyright 2021 The HuggingFace Team. All rights reserved.\\n+#\\n+# Licensed under the Apache License, Version 2.0 (the \"License\");\\n+# you may not use this file except in compliance with the License.\\n+# You may obtain a copy of the License at\\n+#\\n+# http://www.apache.org/licenses/LICENSE-2.0\\n+#\\n+# Unless required by applicable law or agreed to in writing, software\\n+# distributed under the License is distributed on an \"AS IS\" BASIS,\\n+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\\n+# See the License for the specific language governing permissions and\\n+# limitations under the License.\\n+\\n+from typing import List\\n+\\n+import torch\\n+from torch.utils.data import DataLoader, TensorDataset\\n+\\n+from accelerate.accelerator import Accelerator\\n+\\n+\\n+def create_accelerator(even_batches=True):\\n+ accelerator = Accelerator(even_batches=even_batches)\\n+ assert accelerator.num_processes == 2, \"this script expects that two GPUs are available\"\\n+ return accelerator\\n+\\n+\\n+def create_dataloader(accelerator: Accelerator, dataset_size: int, batch_size: int):\\n+ \"\"\"\\n+ Create a simple DataLoader to use during the test cases\\n+ \"\"\"\\n+ dataset = TensorDataset(torch.as_tensor(range(dataset_size)))\\n+\\n+ dl = DataLoader(dataset, batch_size=batch_size)\\n+ dl = accelerator.prepare(dl)\\n+\\n+ return dl\\n+\\n+\\n+def verify_dataloader_batch_sizes(\\n+ accelerator: Accelerator,\\n+ dataset_size: int,\\n+ batch_size: int,\\n+ process_0_expected_batch_sizes: List[int],\\n+ process_1_expected_batch_sizes: List[int],\\n+):\\n+ \"\"\"\\n+ A helper function for verifying the batch sizes coming from a prepared dataloader in each process\\n+ \"\"\"\\n+ dl = create_dataloader(accelerator=accelerator, dataset_size=dataset_size, batch_size=batch_size)\\n+\\n+ batch_sizes = [len(batch[0]) for batch in dl]\\n+\\n+ if accelerator.process_index == 0:\\n+ assert batch_sizes == process_0_expected_batch_sizes\\n+ elif accelerator.process_index == 1:\\n+ assert batch_sizes == process_1_expected_batch_sizes\\n+\\n+\\n+def test_default_ensures_even_batch_sizes():\\n+\\n+ accelerator = create_accelerator()\\n+\\n+ # without padding, we would expect a different number of batches\\n+ verify_dataloader_batch_sizes(\\n+ accelerator,\\n+ dataset_size=3,\\n+ batch_size=1,\\n+ process_0_expected_batch_sizes=[1, 1],\\n+ process_1_expected_batch_sizes=[1, 1],\\n+ )\\n+\\n+ # without padding, we would expect the same number of batches, but different sizes\\n+ verify_dataloader_batch_sizes(\\n+ accelerator,\\n+ dataset_size=7,\\n+ batch_size=2,\\n+ process_0_expected_batch_sizes=[2, 2],\\n+ process_1_expected_batch_sizes=[2, 2],\\n+ )\\n+\\n+\\n+def test_can_disable_even_batches():\\n+ accelerator = create_accelerator(even_batches=False)\\n+\\n+ verify_dataloader_batch_sizes(\\n+ accelerator,\\n+ dataset_size=3,\\n+ batch_size=1,\\n+ process_0_expected_batch_sizes=[1, 1],\\n+ process_1_expected_batch_sizes=[1],\\n+ )\\n+\\n+ verify_dataloader_batch_sizes(\\n+ accelerator,\\n+ dataset_size=7,\\n+ batch_size=2,\\n+ process_0_expected_batch_sizes=[2, 2],\\n+ process_1_expected_batch_sizes=[2, 1],\\n+ )\\n+\\n+\\n+if __name__ == \"__main__\":\\n+ accelerator = create_accelerator()\\n+\\n+ accelerator.print(\"Test that even_batches variable ensures uniform batches across processes\")\\n+ test_default_ensures_even_batch_sizes()\\n+\\n+ accelerator.print(\"Run tests with even_batches disabled\")\\n+ test_can_disable_even_batches()\\ndiff --git a/tests/test_multigpu.py b/tests/test_multigpu.py\\nindex 2c0403ed5..54a5bca75 100644\\n--- a/tests/test_multigpu.py\\n+++ b/tests/test_multigpu.py\\n@@ -28,6 +28,9 @@ class MultiGPUTester(unittest.TestCase):\\n def setUp(self):\\n mod_file = inspect.getfile(accelerate.test_utils)\\n self.test_file_path = os.path.sep.join(mod_file.split(os.path.sep)[:-1] + [\"scripts\", \"test_script.py\"])\\n+ self.data_loop_file_path = os.path.sep.join(\\n+ mod_file.split(os.path.sep)[:-1] + [\"scripts\", \"test_distributed_data_loop.py\"]\\n+ )\\n \\n @require_multi_gpu\\n def test_multi_gpu(self):\\n@@ -42,6 +45,17 @@ def test_pad_across_processes(self):\\n with patch_environment(omp_num_threads=1):\\n execute_subprocess_async(cmd, env=os.environ.copy())\\n \\n+ @require_multi_gpu\\n+ def test_distributed_data_loop(self):\\n+ \"\"\"\\n+ This TestCase checks the behaviour that occurs during distributed training or evaluation,\\n+ when the batch size does not evenly divide the dataset size.\\n+ \"\"\"\\n+ print(f\"Found {torch.cuda.device_count()} devices, using 2 devices only\")\\n+ cmd = get_launch_prefix() + [f\"--nproc_per_node={torch.cuda.device_count()}\", self.data_loop_file_path]\\n+ with patch_environment(omp_num_threads=1, cuda_visible_devices=\"0,1\"):\\n+ execute_subprocess_async(cmd, env=os.environ.copy())\\n+\\n \\n if __name__ == \"__main__\":\\n accelerator = Accelerator()\\n',\n", - " 'code_comments': [{'body': '```suggestion\\r\\n even_batches (`bool`, *optional*, defaults to `True`):\\r\\n```',\n", - " 'diff_hunk': \"@@ -153,6 +153,10 @@ class Accelerator:\\n If set to `True`, the dataloader prepared by the Accelerator is only iterated through on the main process\\n and then the batches are split and broadcast to each process. Will default to `True` for `DataLoader` whose\\n underlying dataset is an `IterableDataset`, `False` otherwise.\\n+ even_batches ('bool', defaults to `True`):\",\n", - " 'from_author': False},\n", - " {'body': '```suggestion\\r\\n even_batches (`bool`, *optional*, defaults to `True`):\\r\\n```',\n", - " 'diff_hunk': \"@@ -615,15 +616,15 @@ def prepare_data_loader(\\n If set to `True`, the datalaoder prepared is only iterated through on the main process and then the batches\\n are split and broadcast to each process. Will default to `True` when the underlying dataset is an\\n `IterableDataset`, `False` otherwise.\\n+ even_batches ('bool', defaults to `True`):\",\n", - " 'from_author': False},\n", - " {'body': 'You should leave the warning but rephrase to state a `BatchSampler` with varying batch size will require `even_batches=False`.',\n", - " 'diff_hunk': \"@@ -615,15 +616,15 @@ def prepare_data_loader(\\n If set to `True`, the datalaoder prepared is only iterated through on the main process and then the batches\\n are split and broadcast to each process. Will default to `True` when the underlying dataset is an\\n `IterableDataset`, `False` otherwise.\\n+ even_batches ('bool', defaults to `True`):\\n+ If set to `True`, in cases where the total batch size across all processes does not exactly divide the\\n+ dataset, samples at the start of the dataset will be duplicated so the batch can be divided equally among\\n+ all workers.\\n \\n Returns:\\n `torch.utils.data.dataloader.DataLoader`: A new data loader that will yield the portion of the batches\\n \\n- \\n-\\n- This does not support `BatchSampler` with varying batch size yet.\",\n", - " 'from_author': False},\n", - " {'body': 'Sure, I can update that',\n", - " 'diff_hunk': \"@@ -615,15 +616,15 @@ def prepare_data_loader(\\n If set to `True`, the datalaoder prepared is only iterated through on the main process and then the batches\\n are split and broadcast to each process. Will default to `True` when the underlying dataset is an\\n `IterableDataset`, `False` otherwise.\\n+ even_batches ('bool', defaults to `True`):\\n+ If set to `True`, in cases where the total batch size across all processes does not exactly divide the\\n+ dataset, samples at the start of the dataset will be duplicated so the batch can be divided equally among\\n+ all workers.\\n \\n Returns:\\n `torch.utils.data.dataloader.DataLoader`: A new data loader that will yield the portion of the batches\\n \\n- \\n-\\n- This does not support `BatchSampler` with varying batch size yet.\",\n", - " 'from_author': True},\n", - " {'body': \"You're currently not testing anything at all, it's never being ran. Please see here to learn how: https://github.com/huggingface/accelerate/blob/main/src/accelerate/test_utils/scripts/test_sync.py#L229-L274\\r\\n\\r\\nI also highly recommend using print statements so we can see just what is being tested at a given moment, and we know if they've run 😄 \",\n", - " 'diff_hunk': '@@ -0,0 +1,103 @@\\n+#!/usr/bin/env python\\n+\\n+# Copyright 2021 The HuggingFace Team. All rights reserved.\\n+#\\n+# Licensed under the Apache License, Version 2.0 (the \"License\");\\n+# you may not use this file except in compliance with the License.\\n+# You may obtain a copy of the License at\\n+#\\n+# http://www.apache.org/licenses/LICENSE-2.0\\n+#\\n+# Unless required by applicable law or agreed to in writing, software\\n+# distributed under the License is distributed on an \"AS IS\" BASIS,\\n+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\\n+# See the License for the specific language governing permissions and\\n+# limitations under the License.\\n+\\n+from typing import List\\n+\\n+import torch\\n+from torch.utils.data import DataLoader, TensorDataset\\n+\\n+from accelerate.accelerator import Accelerator\\n+\\n+\\n+def create_accelerator(even_batches=True):\\n+ accelerator = Accelerator(even_batches=even_batches)\\n+ assert accelerator.num_processes == 2, \"this script expects that two GPUs are available\"\\n+ return accelerator\\n+\\n+\\n+def create_dataloader(accelerator: Accelerator, dataset_size: int, batch_size: int):\\n+ \"\"\"\\n+ Create a simple DataLoader to use during the test cases\\n+ \"\"\"\\n+ dataset = TensorDataset(torch.as_tensor(range(dataset_size)))\\n+\\n+ dl = DataLoader(dataset, batch_size=batch_size)\\n+ dl = accelerator.prepare(dl)\\n+\\n+ return dl\\n+\\n+\\n+def verify_dataloader_batch_sizes(\\n+ accelerator: Accelerator,\\n+ dataset_size: int,\\n+ batch_size: int,\\n+ process_0_expected_batch_sizes: List[int],\\n+ process_1_expected_batch_sizes: List[int],\\n+):\\n+ \"\"\"\\n+ A helper function for verifying the batch sizes coming from a prepared dataloader in each process\\n+ \"\"\"\\n+ dl = create_dataloader(accelerator=accelerator, dataset_size=dataset_size, batch_size=batch_size)\\n+\\n+ batch_sizes = [len(batch[0]) for batch in dl]\\n+\\n+ if accelerator.process_index == 0:\\n+ assert batch_sizes == process_0_expected_batch_sizes\\n+ elif accelerator.process_index == 1:\\n+ assert batch_sizes == process_1_expected_batch_sizes\\n+\\n+\\n+def test_default_ensures_even_batch_sizes():\\n+\\n+ accelerator = create_accelerator()\\n+\\n+ # without padding, we would expect a different number of batches\\n+ verify_dataloader_batch_sizes(\\n+ accelerator,\\n+ dataset_size=3,\\n+ batch_size=1,\\n+ process_0_expected_batch_sizes=[1, 1],\\n+ process_1_expected_batch_sizes=[1, 1],\\n+ )\\n+\\n+ # without padding, we would expect the same number of batches, but different sizes\\n+ verify_dataloader_batch_sizes(\\n+ accelerator,\\n+ dataset_size=7,\\n+ batch_size=2,\\n+ process_0_expected_batch_sizes=[2, 2],\\n+ process_1_expected_batch_sizes=[2, 2],\\n+ )\\n+\\n+\\n+def test_can_disable_even_batches():',\n", - " 'from_author': False},\n", - " {'body': 'Gah! I think I misinterpreted what you meant when you said PyTest format. Looking at the other example, it looks like it still needs a main method to drive running the tests; just what I removed a few commits back :satisfied:! I\\'ll reinsert the main method and some print statements.\\r\\n\\r\\nThat said, when I run it with PyTest locally, using `CUDA_VISIBLE_DEVICES=\"0,1\" pytest -sv tests/test_multigpu.py` it does seem to pick them up.',\n", - " 'diff_hunk': '@@ -0,0 +1,103 @@\\n+#!/usr/bin/env python\\n+\\n+# Copyright 2021 The HuggingFace Team. All rights reserved.\\n+#\\n+# Licensed under the Apache License, Version 2.0 (the \"License\");\\n+# you may not use this file except in compliance with the License.\\n+# You may obtain a copy of the License at\\n+#\\n+# http://www.apache.org/licenses/LICENSE-2.0\\n+#\\n+# Unless required by applicable law or agreed to in writing, software\\n+# distributed under the License is distributed on an \"AS IS\" BASIS,\\n+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\\n+# See the License for the specific language governing permissions and\\n+# limitations under the License.\\n+\\n+from typing import List\\n+\\n+import torch\\n+from torch.utils.data import DataLoader, TensorDataset\\n+\\n+from accelerate.accelerator import Accelerator\\n+\\n+\\n+def create_accelerator(even_batches=True):\\n+ accelerator = Accelerator(even_batches=even_batches)\\n+ assert accelerator.num_processes == 2, \"this script expects that two GPUs are available\"\\n+ return accelerator\\n+\\n+\\n+def create_dataloader(accelerator: Accelerator, dataset_size: int, batch_size: int):\\n+ \"\"\"\\n+ Create a simple DataLoader to use during the test cases\\n+ \"\"\"\\n+ dataset = TensorDataset(torch.as_tensor(range(dataset_size)))\\n+\\n+ dl = DataLoader(dataset, batch_size=batch_size)\\n+ dl = accelerator.prepare(dl)\\n+\\n+ return dl\\n+\\n+\\n+def verify_dataloader_batch_sizes(\\n+ accelerator: Accelerator,\\n+ dataset_size: int,\\n+ batch_size: int,\\n+ process_0_expected_batch_sizes: List[int],\\n+ process_1_expected_batch_sizes: List[int],\\n+):\\n+ \"\"\"\\n+ A helper function for verifying the batch sizes coming from a prepared dataloader in each process\\n+ \"\"\"\\n+ dl = create_dataloader(accelerator=accelerator, dataset_size=dataset_size, batch_size=batch_size)\\n+\\n+ batch_sizes = [len(batch[0]) for batch in dl]\\n+\\n+ if accelerator.process_index == 0:\\n+ assert batch_sizes == process_0_expected_batch_sizes\\n+ elif accelerator.process_index == 1:\\n+ assert batch_sizes == process_1_expected_batch_sizes\\n+\\n+\\n+def test_default_ensures_even_batch_sizes():\\n+\\n+ accelerator = create_accelerator()\\n+\\n+ # without padding, we would expect a different number of batches\\n+ verify_dataloader_batch_sizes(\\n+ accelerator,\\n+ dataset_size=3,\\n+ batch_size=1,\\n+ process_0_expected_batch_sizes=[1, 1],\\n+ process_1_expected_batch_sizes=[1, 1],\\n+ )\\n+\\n+ # without padding, we would expect the same number of batches, but different sizes\\n+ verify_dataloader_batch_sizes(\\n+ accelerator,\\n+ dataset_size=7,\\n+ batch_size=2,\\n+ process_0_expected_batch_sizes=[2, 2],\\n+ process_1_expected_batch_sizes=[2, 2],\\n+ )\\n+\\n+\\n+def test_can_disable_even_batches():',\n", - " 'from_author': True},\n", - " {'body': \"I've just pushed some changes that hopefully resolve this. Just FYI, it is getting pretty late here in the UK, and I am heading off on vacation tomorrow until the 31st. If there are other changes required, and you would like this merged before I'm back, please feel free to update the branch with any further changes!\",\n", - " 'diff_hunk': '@@ -0,0 +1,103 @@\\n+#!/usr/bin/env python\\n+\\n+# Copyright 2021 The HuggingFace Team. All rights reserved.\\n+#\\n+# Licensed under the Apache License, Version 2.0 (the \"License\");\\n+# you may not use this file except in compliance with the License.\\n+# You may obtain a copy of the License at\\n+#\\n+# http://www.apache.org/licenses/LICENSE-2.0\\n+#\\n+# Unless required by applicable law or agreed to in writing, software\\n+# distributed under the License is distributed on an \"AS IS\" BASIS,\\n+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\\n+# See the License for the specific language governing permissions and\\n+# limitations under the License.\\n+\\n+from typing import List\\n+\\n+import torch\\n+from torch.utils.data import DataLoader, TensorDataset\\n+\\n+from accelerate.accelerator import Accelerator\\n+\\n+\\n+def create_accelerator(even_batches=True):\\n+ accelerator = Accelerator(even_batches=even_batches)\\n+ assert accelerator.num_processes == 2, \"this script expects that two GPUs are available\"\\n+ return accelerator\\n+\\n+\\n+def create_dataloader(accelerator: Accelerator, dataset_size: int, batch_size: int):\\n+ \"\"\"\\n+ Create a simple DataLoader to use during the test cases\\n+ \"\"\"\\n+ dataset = TensorDataset(torch.as_tensor(range(dataset_size)))\\n+\\n+ dl = DataLoader(dataset, batch_size=batch_size)\\n+ dl = accelerator.prepare(dl)\\n+\\n+ return dl\\n+\\n+\\n+def verify_dataloader_batch_sizes(\\n+ accelerator: Accelerator,\\n+ dataset_size: int,\\n+ batch_size: int,\\n+ process_0_expected_batch_sizes: List[int],\\n+ process_1_expected_batch_sizes: List[int],\\n+):\\n+ \"\"\"\\n+ A helper function for verifying the batch sizes coming from a prepared dataloader in each process\\n+ \"\"\"\\n+ dl = create_dataloader(accelerator=accelerator, dataset_size=dataset_size, batch_size=batch_size)\\n+\\n+ batch_sizes = [len(batch[0]) for batch in dl]\\n+\\n+ if accelerator.process_index == 0:\\n+ assert batch_sizes == process_0_expected_batch_sizes\\n+ elif accelerator.process_index == 1:\\n+ assert batch_sizes == process_1_expected_batch_sizes\\n+\\n+\\n+def test_default_ensures_even_batch_sizes():\\n+\\n+ accelerator = create_accelerator()\\n+\\n+ # without padding, we would expect a different number of batches\\n+ verify_dataloader_batch_sizes(\\n+ accelerator,\\n+ dataset_size=3,\\n+ batch_size=1,\\n+ process_0_expected_batch_sizes=[1, 1],\\n+ process_1_expected_batch_sizes=[1, 1],\\n+ )\\n+\\n+ # without padding, we would expect the same number of batches, but different sizes\\n+ verify_dataloader_batch_sizes(\\n+ accelerator,\\n+ dataset_size=7,\\n+ batch_size=2,\\n+ process_0_expected_batch_sizes=[2, 2],\\n+ process_1_expected_batch_sizes=[2, 2],\\n+ )\\n+\\n+\\n+def test_can_disable_even_batches():',\n", - " 'from_author': True}],\n", - " 'context': [{'body': '_The documentation is not available anymore as the PR was closed or merged._',\n", - " 'from_author': False},\n", - " {'body': \"Hi, I have made a start on the work discussed in https://github.com/huggingface/accelerate/issues/684, and added an argument to the accelerator. \\r\\n\\r\\nI am slightly unsure about your testing strategy though, and which tests you require for this; I couldn't seem to find any integration tests for accelerator arguments. So far, I have tried to follow similar ideas to those seen in the end-to-end multi-GPU tests. \\r\\n\\r\\nI am slightly confused by exactly how the Multi-GPU tests are triggered though; whilst I see that there is a decorator to skip certain tests when multiple GPUs are not available, when I try to run this locally (on a machine with 2 GPUs), those tests only seem to execute on a single process.\\r\\n\\r\\nAny guidance here would be appreciated! cc: @muellerzr @sgugger \",\n", - " 'from_author': True},\n", - " {'body': 'Hey @Chris-hughes10! Thanks!\\r\\n\\r\\nYou\\'re absolutely right, I should probably write this all up in a contribution doc and will add it to my list of tasks!\\r\\n\\r\\nAccelerates tests are setup in a few ways, but the key idea to remember is:\\r\\n\\r\\nIf it\\'s something to be tested across multiple devices/workers, such as CPU, TPU, and mutli-GPU (such as your tests you want to add), these all get thrown into a script located in `accelerate/test_utils/scripts`\\r\\n\\r\\nFor example, the major integration script that checks the dataloaders operate properly is located at `accelerate/test_utils/scripts/test_script.py` (I wonder if we should section this out a bit more @sgugger since we now have a whole folder dedicated to it and we can clean it up some), which you can find here: https://github.com/huggingface/accelerate/blob/main/src/accelerate/test_utils/scripts/test_script.py\\r\\n\\r\\nThese tests are setup like how a `pytest` test would be, where you have `def some_test` and `assert` statements to check if things are working properly. \\r\\n\\r\\nFrom here your script should be added as a test case to `tests/test.py`, `tests/test_cpu.py` (if applicable for multi-cpu), `tests/test_multigpu.py`, and `tests/test_tpu.py`.\\r\\n\\r\\nIt has to be done this way because each platform has different launching configurations :) \\r\\n\\r\\nFinally, when running pytest you need to set the cuda visible devices for it to be able to pick up on the multiple GPUs, such as:\\r\\n\\r\\n```bash\\r\\nCUDA_VISIBLE_DEVICES=\"0,1\" pytest -sv tests/test_multigpu.py\\r\\n```\\r\\nOr:\\r\\n```bash\\r\\nCUDA_VISIBLE_DEVICES=\"\" pytest -sv tests/test_multigpu.py\\r\\n```\\r\\n(the latter will have all their tests skipped). \\r\\n\\r\\nAll skipping decorator conditionals can be found here: https://github.com/huggingface/accelerate/blob/main/src/accelerate/test_utils/testing.py\\r\\n\\r\\nLet me know if there are any other questions you have on this @Chris-hughes10, afterwards I can throw this all up into some documentation :) ',\n", - " 'from_author': False},\n", - " {'body': 'Hey @muellerzr, Thanks for the info!\\r\\n\\r\\nSo, it looks like I was on the right track regarding the structure, but I was running with unittests and not Pytest. I have refactored it to pytest, and it seems to work alright using `CUDA_VISIBLE_DEVICES=\"0,1\" pytest -sv tests/test_multigpu.py, so it is probably ready for an initial review. \\r\\n\\r\\nI noticed that the main integration script seems to be doing a lot, so I created a new script to be super clear on what it is testing. The tests for the context manager and joining can probably live here too when implemented.\\r\\n\\r\\nAs an aside, how do you guys set up your development environment? I have set up a dev container, using the accelerate-gpu image, if it is useful, I could push that in another PR.',\n", - " 'from_author': True},\n", - " {'body': \"@Chris-hughes10 I use conda for mine, and the CI's all run based off the docker images as there's hosted computes we use\",\n", - " 'from_author': False},\n", - " {'body': \"> @Chris-hughes10 I use conda for mine, and the CI's all run based off the conda images as there's hosted computes we use\\r\\n\\r\\nOk, if you are using conda envs, I think that this could be useful. I'll create a small PR and you guys can see what you think.\",\n", - " 'from_author': True},\n", - " {'body': 'Hey @muellerzr, are there any further changes required on this?',\n", - " 'from_author': True},\n", - " {'body': \"@Chris-hughes10 close, your tests still weren't passing, and this is because `nproc_per_node` needed to be specified in the test function. (Discussing with Sylvain if this should be *everywhere* or not) but basically without that `Accelerator.num_processes` was only ever equal to one. \\r\\n\\r\\nWent ahead and pushed the fix :)\",\n", - " 'from_author': False},\n", - " {'body': 'Thanks for all your work on this!', 'from_author': False}],\n", - " 'url': 'https://api.github.com/repos/huggingface/accelerate/pulls/781'},\n", - " 1092658402: {'diff': 'diff --git a/docs/source/package_reference/cli.mdx b/docs/source/package_reference/cli.mdx\\nindex a9e955d43..52e8ecfb6 100644\\n--- a/docs/source/package_reference/cli.mdx\\n+++ b/docs/source/package_reference/cli.mdx\\n@@ -95,10 +95,13 @@ The following arguments are useful for customization of worker machines\\n * `--num_machines NUM_MACHINES` (`int`) -- The total number of machines used in this training.\\n * `--num_processes NUM_PROCESSES` (`int`) -- The total number of processes to be launched in parallel.\\n * `--gpu_ids` (`str`) -- What GPUs (by id) should be used for training on this machine as a comma-seperated list\\n+* `--same_network` (`bool`) -- Whether all machines used for multinode training exist on the same local network.\\n * `--main_process_ip MAIN_PROCESS_IP` (`str`) -- The IP address of the machine of rank 0.\\n * `--main_process_port MAIN_PROCESS_PORT` (`int`) -- The port to use to communicate with the machine of rank 0.\\n+* `--rdzv_conf` (`str`) -- Additional rendezvous configuration (=,=,...).\\n * `--num_cpu_threads_per_process NUM_CPU_THREADS_PER_PROCESS` (`int`) -- The number of CPU threads per process. Can be tuned for optimal performance.\\n-\\n+* `--max_restarts` (`int`) -- Maximum number of worker group restarts before failing.\\n+* `--monitor_interval` (`float`) -- Interval, in seconds, to monitor the state of workers.\\n \\n **DeepSpeed Arguments**:\\n \\n@@ -125,8 +128,9 @@ The following arguments are only useful when `use_fdsp` is passed:\\n **TPU Arguments**:\\n \\n The following arguments are only useful when `tpu` is passed:\\n-* `--tpu` (`bool`) - Whether or not this should launch a TPU training.\\n+* `--tpu` (`bool`) -- Whether or not this should launch a TPU training.\\n * `--main_training_function MAIN_TRAINING_FUNCTION` (`str`) -- The name of the main function to be executed in your script.\\n+* `--downcast_bf16` (`bool`) -- Whether when using bf16 precision on TPUs if both float and double tensors are cast to bfloat16 or if double tensors remain as float32.\\n \\n **AWS SageMaker Arguments**:\\n \\ndiff --git a/src/accelerate/commands/launch.py b/src/accelerate/commands/launch.py\\nindex 2198bb3f1..b9f6c1089 100644\\n--- a/src/accelerate/commands/launch.py\\n+++ b/src/accelerate/commands/launch.py\\n@@ -325,6 +325,12 @@ def launch_command_parser(subparsers=None):\\n default=None,\\n help=\"What GPUs (by id) should be used for training on this machine as a comma-seperated list\",\\n )\\n+ parser.add_argument(\\n+ \"--same_network\",\\n+ default=False,\\n+ action=\"store_true\",\\n+ help=\"Whether all machines used for multinode training exist on the same local network.\",\\n+ )\\n parser.add_argument(\\n \"--machine_rank\", type=int, default=None, help=\"The rank of the machine on which this script is launched.\"\\n )\\n@@ -363,7 +369,7 @@ def launch_command_parser(subparsers=None):\\n parser.add_argument(\\n \"--downcast_bf16\",\\n action=\"store_true\",\\n- help=\"Whether when using bf16 precision on TPUs if both float and double tensors are cast to bfloat16 or if double tensors remain as float32\",\\n+ help=\"Whether when using bf16 precision on TPUs if both float and double tensors are cast to bfloat16 or if double tensors remain as float32.\",\\n )\\n parser.add_argument(\\n \"-m\",\\n',\n", - " 'code_comments': [],\n", - " 'context': [{'body': '_The documentation is not available anymore as the PR was closed or merged._',\n", - " 'from_author': False}],\n", - " 'url': 'https://api.github.com/repos/huggingface/accelerate/pulls/780'},\n", - " 1092437199: {'diff': 'diff --git a/src/accelerate/commands/launch.py b/src/accelerate/commands/launch.py\\nindex fa7588b0a..29e1b7bad 100644\\n--- a/src/accelerate/commands/launch.py\\n+++ b/src/accelerate/commands/launch.py\\n@@ -573,7 +573,7 @@ def multi_gpu_launcher(args):\\n try:\\n distrib_run.run(args)\\n except:\\n- if debug:\\n+ if is_rich_available() and debug:\\n console = get_console()\\n console.print(\"\\\\n[bold red]Using --debug, `torch.distributed` Stack Trace:[/bold red]\")\\n console.print_exception(suppress=[__file__], show_locals=False)\\n@@ -684,7 +684,7 @@ def deepspeed_launcher(args):\\n try:\\n distrib_run.run(args)\\n except:\\n- if debug:\\n+ if is_rich_available() and debug:\\n console = get_console()\\n console.print(\"\\\\n[bold red]Using --debug, `torch.distributed` Stack Trace:[/bold red]\")\\n console.print_exception(suppress=[__file__], show_locals=False)\\ndiff --git a/src/accelerate/state.py b/src/accelerate/state.py\\nindex 82ea288f0..00ba3ab67 100644\\n--- a/src/accelerate/state.py\\n+++ b/src/accelerate/state.py\\n@@ -14,11 +14,19 @@\\n \\n import os\\n import warnings\\n-from distutils.util import strtobool\\n \\n import torch\\n \\n-from .utils import DistributedType, get_ccl_version, is_ccl_available, is_deepspeed_available, is_tpu_available\\n+from .utils import (\\n+ DistributedType,\\n+ get_ccl_version,\\n+ get_int_from_env,\\n+ is_ccl_available,\\n+ is_deepspeed_available,\\n+ is_tpu_available,\\n+ parse_choice_from_env,\\n+ parse_flag_from_env,\\n+)\\n from .utils.dataclasses import SageMakerDistributedType\\n \\n \\n@@ -26,25 +34,6 @@\\n import torch_xla.core.xla_model as xm\\n \\n \\n-def get_int_from_env(env_keys, default):\\n- \"\"\"Returns the first positive env value found in the `env_keys` list or the default.\"\"\"\\n- for e in env_keys:\\n- val = int(os.environ.get(e, -1))\\n- if val >= 0:\\n- return val\\n- return default\\n-\\n-\\n-def parse_flag_from_env(key, default=False):\\n- value = os.environ.get(key, str(default))\\n- return strtobool(value) == 1 # As its name indicates `strtobool` actually returns an int...\\n-\\n-\\n-def parse_choice_from_env(key, default=\"no\"):\\n- value = os.environ.get(key, str(default))\\n- return value\\n-\\n-\\n # Inspired by Alex Martelli\\'s \\'Borg\\'.\\n class AcceleratorState:\\n \"\"\"\\ndiff --git a/src/accelerate/utils/__init__.py b/src/accelerate/utils/__init__.py\\nindex b472ec556..b22518f9a 100644\\n--- a/src/accelerate/utils/__init__.py\\n+++ b/src/accelerate/utils/__init__.py\\n@@ -19,6 +19,7 @@\\n SageMakerDistributedType,\\n TensorInformation,\\n )\\n+from .environment import get_int_from_env, parse_choice_from_env, parse_flag_from_env\\n from .imports import (\\n get_ccl_version,\\n is_aim_available,\\ndiff --git a/src/accelerate/utils/environment.py b/src/accelerate/utils/environment.py\\nnew file mode 100644\\nindex 000000000..9247bb4fe\\n--- /dev/null\\n+++ b/src/accelerate/utils/environment.py\\n@@ -0,0 +1,36 @@\\n+# Copyright 2022 The HuggingFace Team. All rights reserved.\\n+#\\n+# Licensed under the Apache License, Version 2.0 (the \"License\");\\n+# you may not use this file except in compliance with the License.\\n+# You may obtain a copy of the License at\\n+#\\n+# http://www.apache.org/licenses/LICENSE-2.0\\n+#\\n+# Unless required by applicable law or agreed to in writing, software\\n+# distributed under the License is distributed on an \"AS IS\" BASIS,\\n+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\\n+# See the License for the specific language governing permissions and\\n+# limitations under the License.\\n+\\n+import os\\n+from distutils.util import strtobool\\n+\\n+\\n+def get_int_from_env(env_keys, default):\\n+ \"\"\"Returns the first positive env value found in the `env_keys` list or the default.\"\"\"\\n+ for e in env_keys:\\n+ val = int(os.environ.get(e, -1))\\n+ if val >= 0:\\n+ return val\\n+ return default\\n+\\n+\\n+def parse_flag_from_env(key, default=False):\\n+ \"\"\"Returns truthy value for `key` from the env if available else the default.\"\"\"\\n+ value = os.environ.get(key, str(default))\\n+ return strtobool(value) == 1 # As its name indicates `strtobool` actually returns an int...\\n+\\n+\\n+def parse_choice_from_env(key, default=\"no\"):\\n+ value = os.environ.get(key, str(default))\\n+ return value\\ndiff --git a/src/accelerate/utils/imports.py b/src/accelerate/utils/imports.py\\nindex bdb53988a..c33258a92 100644\\n--- a/src/accelerate/utils/imports.py\\n+++ b/src/accelerate/utils/imports.py\\n@@ -20,6 +20,7 @@\\n \\n from packaging.version import parse\\n \\n+from .environment import parse_flag_from_env\\n from .versions import compare_versions, is_torch_version\\n \\n \\n@@ -126,7 +127,7 @@ def is_boto3_available():\\n \\n \\n def is_rich_available():\\n- return importlib.util.find_spec(\"rich\") is not None\\n+ return (importlib.util.find_spec(\"rich\") is not None) and (not parse_flag_from_env(\"DISABLE_RICH\"))\\n \\n \\n def is_sagemaker_available():\\n',\n", - " 'code_comments': [{'body': \"Let's accept anything that's truthy here? Like yes/no, true/false, 0/1\",\n", - " 'diff_hunk': '@@ -126,7 +127,7 @@ def is_boto3_available():\\n \\n \\n def is_rich_available():\\n- return importlib.util.find_spec(\"rich\") is not None\\n+ return (importlib.util.find_spec(\"rich\") is not None) and (not os.environ.get(\"DISABLE_RICH\", \"0\") == \"1\")',\n", - " 'from_author': False}],\n", - " 'context': [{'body': '_The documentation is not available anymore as the PR was closed or merged._',\n", - " 'from_author': False}],\n", - " 'url': 'https://api.github.com/repos/huggingface/accelerate/pulls/779'},\n", - " 1092392637: {'diff': 'diff --git a/src/accelerate/commands/launch.py b/src/accelerate/commands/launch.py\\nindex fa7588b0a..071c86a1d 100644\\n--- a/src/accelerate/commands/launch.py\\n+++ b/src/accelerate/commands/launch.py\\n@@ -470,7 +470,7 @@ def multi_gpu_launcher(args):\\n setattr(args, \"nproc_per_node\", str(num_processes // num_machines))\\n setattr(args, \"nnodes\", str(num_machines))\\n setattr(args, \"node_rank\", int(args.machine_rank))\\n- if getattr(args, \"same_network\"):\\n+ if getattr(args, \"same_network\", False):\\n setattr(args, \"master_addr\", str(main_process_ip))\\n setattr(args, \"master_port\", str(main_process_port))\\n else:\\n@@ -488,7 +488,7 @@ def multi_gpu_launcher(args):\\n setattr(args, \"no_python\", True)\\n \\n current_env = os.environ.copy()\\n- gpu_ids = getattr(args, \"gpu_ids\")\\n+ gpu_ids = getattr(args, \"gpu_ids\", \"all\")\\n if gpu_ids != \"all\" and args.gpu_ids is not None:\\n current_env[\"CUDA_VISIBLE_DEVICES\"] = gpu_ids\\n mixed_precision = args.mixed_precision.lower()\\n@@ -618,7 +618,7 @@ def deepspeed_launcher(args):\\n setattr(args, \"nproc_per_node\", str(num_processes // num_machines))\\n setattr(args, \"nnodes\", str(num_machines))\\n setattr(args, \"node_rank\", int(args.machine_rank))\\n- if getattr(args, \"same_network\"):\\n+ if getattr(args, \"same_network\", False):\\n setattr(args, \"master_addr\", str(main_process_ip))\\n setattr(args, \"master_port\", str(main_process_port))\\n else:\\n@@ -636,7 +636,7 @@ def deepspeed_launcher(args):\\n setattr(args, \"no_python\", True)\\n \\n current_env = os.environ.copy()\\n- gpu_ids = getattr(args, \"gpu_ids\")\\n+ gpu_ids = getattr(args, \"gpu_ids\", \"all\")\\n if gpu_ids != \"all\" and args.gpu_ids is not None:\\n current_env[\"CUDA_VISIBLE_DEVICES\"] = gpu_ids\\n try:\\n',\n", - " 'code_comments': [],\n", - " 'context': [{'body': '_The documentation is not available anymore as the PR was closed or merged._',\n", - " 'from_author': False}],\n", - " 'url': 'https://api.github.com/repos/huggingface/accelerate/pulls/778'},\n", - " 1092225159: {'diff': 'diff --git a/src/accelerate/utils/modeling.py b/src/accelerate/utils/modeling.py\\nindex 1760e72bc..c14833557 100644\\n--- a/src/accelerate/utils/modeling.py\\n+++ b/src/accelerate/utils/modeling.py\\n@@ -408,8 +408,9 @@ def get_balanced_memory(\\n per_gpu += buffer\\n \\n max_memory = get_max_memory(max_memory)\\n+ last_gpu = max(i for i in max_memory if isinstance(i, int) and max_memory[i] > 0)\\n # The last device is left with max_memory just in case the buffer is not enough.\\n- for i in range(len(max_memory) - 1):\\n+ for i in range(last_gpu):\\n max_memory[i] = min(0 if low_zero and i == 0 else per_gpu, max_memory[i])\\n \\n if low_zero:\\ndiff --git a/tests/test_modeling_utils.py b/tests/test_modeling_utils.py\\nindex 02ab867d7..16243d5e1 100644\\n--- a/tests/test_modeling_utils.py\\n+++ b/tests/test_modeling_utils.py\\n@@ -376,6 +376,10 @@ def test_get_balanced_memory(self):\\n max_memory = get_balanced_memory(model, max_memory={0: 300, 1: 500})\\n self.assertDictEqual({0: 215, 1: 500}, max_memory)\\n \\n+ # Last device always get max memory to give more buffer, even if CPU is provided\\n+ max_memory = get_balanced_memory(model, max_memory={0: 300, \"cpu\": 1000})\\n+ self.assertDictEqual({0: 300, \"cpu\": 1000}, max_memory)\\n+\\n # If we set a device to 0, it\\'s not counted.\\n max_memory = get_balanced_memory(model, max_memory={0: 0, 1: 300, 2: 300})\\n self.assertDictEqual({0: 0, 1: 215, 2: 300}, max_memory)\\n',\n", - " 'code_comments': [{'body': '```suggestion\\r\\n offset = 2 if \"cpu\" in (list(max_memory.keys())) else 1\\r\\n for i in range(len(max_memory) - offset):\\r\\n```\\r\\nI think this way it seems that the slow tests of `accelerate` are not broken and we also keep the `transformers` tests pass! Can you confirm this pass the `transformers` tests that were broken @ArthurZucker ? 🙏 ',\n", - " 'diff_hunk': '@@ -409,7 +409,7 @@ def get_balanced_memory(\\n \\n max_memory = get_max_memory(max_memory)\\n # The last device is left with max_memory just in case the buffer is not enough.\\n- for i in range(len(max_memory) - 1):\\n+ for i in range(num_devices - 1):',\n", - " 'from_author': False},\n", - " {'body': 'Will have a look😄 I am also adding the following \\r\\n```python\\r\\nmax_memory = get_balanced_memory(model, max_memory={0: 300, 1: 500, \"cpu\":1000})\\r\\n```',\n", - " 'diff_hunk': '@@ -409,7 +409,7 @@ def get_balanced_memory(\\n \\n max_memory = get_max_memory(max_memory)\\n # The last device is left with max_memory just in case the buffer is not enough.\\n- for i in range(len(max_memory) - 1):\\n+ for i in range(num_devices - 1):',\n", - " 'from_author': True},\n", - " {'body': 'Ahhh great! I was missing this test! Thanks for clarifying 💪 ',\n", - " 'diff_hunk': '@@ -409,7 +409,7 @@ def get_balanced_memory(\\n \\n max_memory = get_max_memory(max_memory)\\n # The last device is left with max_memory just in case the buffer is not enough.\\n- for i in range(len(max_memory) - 1):\\n+ for i in range(num_devices - 1):',\n", - " 'from_author': False},\n", - " {'body': \"This won't work when the last device is GPU 3 but the user forced 0 at GPU 0, since `num_devices` will be 3 (instead of 4) and thus the max memory will be at GPU 2 instead of 3. (you can try running the multi-GPU tests and they will fail).\\r\\n\\r\\nAgreed that `len(max_memory)` is not good either, so we should just compute the biggest integer in the keys of `max_memory`:\\r\\n```py\\r\\nlast_gpu = max(i for i in max_memory if isintance(i, int))\\r\\n```\\r\\nthen use this.\\r\\n\\r\\n\",\n", - " 'diff_hunk': '@@ -409,7 +409,7 @@ def get_balanced_memory(\\n \\n max_memory = get_max_memory(max_memory)\\n # The last device is left with max_memory just in case the buffer is not enough.\\n- for i in range(len(max_memory) - 1):\\n+ for i in range(num_devices - 1):',\n", - " 'from_author': False},\n", - " {'body': 'Okay will add this suggestion 💯 ',\n", - " 'diff_hunk': '@@ -409,7 +409,7 @@ def get_balanced_memory(\\n \\n max_memory = get_max_memory(max_memory)\\n # The last device is left with max_memory just in case the buffer is not enough.\\n- for i in range(len(max_memory) - 1):\\n+ for i in range(num_devices - 1):',\n", - " 'from_author': True},\n", - " {'body': 'Oh and we should avoid a GPU that the user set at 0, so actually\\r\\n```\\r\\nlast_gpu = max(i for i in max_memory if isintance(i, int) and max_memory[i] > 0)\\r\\n```',\n", - " 'diff_hunk': '@@ -409,7 +409,7 @@ def get_balanced_memory(\\n \\n max_memory = get_max_memory(max_memory)\\n # The last device is left with max_memory just in case the buffer is not enough.\\n- for i in range(len(max_memory) - 1):\\n+ for i in range(num_devices - 1):',\n", - " 'from_author': False}],\n", - " 'context': [{'body': '_The documentation is not available anymore as the PR was closed or merged._',\n", - " 'from_author': False}],\n", - " 'url': 'https://api.github.com/repos/huggingface/accelerate/pulls/777'},\n", - " 1091301397: {'diff': 'diff --git a/src/accelerate/data_loader.py b/src/accelerate/data_loader.py\\nindex c4a015156..cdae54af9 100644\\n--- a/src/accelerate/data_loader.py\\n+++ b/src/accelerate/data_loader.py\\n@@ -112,6 +112,9 @@ class BatchSamplerShard(BatchSampler):\\n this argument is set to `False`.\\n - the sampler on process 0 to yield `[0, 1]` then `[4, 5]` and the sampler on process 1 to yield `[2, 3]`\\n then `[6, 7]` if this argument is set to `True`.\\n+ even_batches (`bool`, *optional*, defaults to `True`):\\n+ Whether or not to loop back at the beginning of the sampler when the number of samples is not a round\\n+ multiple of (original batch size / number of processes).\\n \\n \\n \\n@@ -125,6 +128,7 @@ def __init__(\\n num_processes: int = 1,\\n process_index: int = 0,\\n split_batches: bool = False,\\n+ even_batches: bool = True,\\n ):\\n if split_batches and batch_sampler.batch_size % num_processes != 0:\\n raise ValueError(\\n@@ -135,8 +139,11 @@ def __init__(\\n self.num_processes = num_processes\\n self.process_index = process_index\\n self.split_batches = split_batches\\n- self.batch_size = batch_sampler.batch_size\\n- self.drop_last = batch_sampler.drop_last\\n+ self.even_batches = even_batches\\n+ self.batch_size = getattr(batch_sampler, \"batch_size\", None)\\n+ self.drop_last = getattr(batch_sampler, \"drop_last\", False)\\n+ if self.batch_size is None and self.even_batches:\\n+ raise ValueError(\"You need to use `even_batches=False` when the batch sampler has no batch size.\")\\n \\n @property\\n def total_length(self):\\n@@ -144,11 +151,21 @@ def total_length(self):\\n \\n def __len__(self):\\n if self.split_batches:\\n+ # Split batches does not change the length of the batch sampler\\n return len(self.batch_sampler)\\n if len(self.batch_sampler) % self.num_processes == 0:\\n+ # If the length is a round multiple of the number of processes, it\\'s easy.\\n return len(self.batch_sampler) // self.num_processes\\n length = len(self.batch_sampler) // self.num_processes\\n- return length if self.drop_last else length + 1\\n+ if self.drop_last:\\n+ # Same if we drop the remainder.\\n+ return length\\n+ elif self.even_batches:\\n+ # When we even batches we always get +1\\n+ return length + 1\\n+ else:\\n+ # Otherwise it depends on the process index.\\n+ return length + 1 if self.process_index < len(self.batch_sampler) % self.num_processes else length\\n \\n def __iter__(self):\\n return self._iter_with_split() if self.split_batches else self._iter_with_no_split()\\n@@ -165,11 +182,15 @@ def _iter_with_split(self):\\n \\n # If drop_last is True of the last batch was full, iteration is over, otherwise...\\n if not self.drop_last and len(initial_data) > 0 and len(batch) < self.batch_size:\\n- # For degenerate cases where the dataset has less than num_process * batch_size samples\\n- while len(initial_data) < self.batch_size:\\n- initial_data += initial_data\\n- batch = batch + initial_data\\n- yield batch[batch_length * self.process_index : batch_length * (self.process_index + 1)]\\n+ if not self.even_batches:\\n+ if len(batch) > batch_length * self.process_index:\\n+ yield batch[batch_length * self.process_index : batch_length * (self.process_index + 1)]\\n+ else:\\n+ # For degenerate cases where the dataset has less than num_process * batch_size samples\\n+ while len(initial_data) < self.batch_size:\\n+ initial_data += initial_data\\n+ batch = batch + initial_data\\n+ yield batch[batch_length * self.process_index : batch_length * (self.process_index + 1)]\\n \\n def _iter_with_no_split(self):\\n initial_data = []\\n@@ -182,35 +203,41 @@ def _iter_with_no_split(self):\\n # yielding it.\\n if idx % self.num_processes == self.process_index:\\n batch_to_yield = batch\\n- if idx % self.num_processes == self.num_processes - 1 and len(batch) == self.batch_size:\\n+ if idx % self.num_processes == self.num_processes - 1 and (\\n+ self.batch_size is None or len(batch) == self.batch_size\\n+ ):\\n yield batch_to_yield\\n batch_to_yield = []\\n \\n # If drop_last is True, iteration is over, otherwise...\\n if not self.drop_last and len(initial_data) > 0:\\n- # ... we yield the complete batch we had saved before if it has the proper length\\n- if len(batch_to_yield) == self.batch_size:\\n- yield batch_to_yield\\n-\\n- # For degenerate cases where the dataset has less than num_process * batch_size samples\\n- while len(initial_data) < self.num_processes * self.batch_size:\\n- initial_data += initial_data\\n-\\n- # If the last batch seen was of the proper size, it has been yielded by its process so we move to the next\\n- if len(batch) == self.batch_size:\\n- batch = []\\n- idx += 1\\n-\\n- # Make sure we yield a multiple of self.num_processes batches\\n- cycle_index = 0\\n- while idx % self.num_processes != 0 or len(batch) > 0:\\n- end_index = cycle_index + self.batch_size - len(batch)\\n- batch += initial_data[cycle_index:end_index]\\n- if idx % self.num_processes == self.process_index:\\n- yield batch\\n- cycle_index = end_index\\n- batch = []\\n- idx += 1\\n+ if not self.even_batches:\\n+ if len(batch_to_yield) > 0:\\n+ yield batch_to_yield\\n+ else:\\n+ # ... we yield the complete batch we had saved before if it has the proper length\\n+ if len(batch_to_yield) == self.batch_size:\\n+ yield batch_to_yield\\n+\\n+ # For degenerate cases where the dataset has less than num_process * batch_size samples\\n+ while len(initial_data) < self.num_processes * self.batch_size:\\n+ initial_data += initial_data\\n+\\n+ # If the last batch seen was of the proper size, it has been yielded by its process so we move to the next\\n+ if len(batch) == self.batch_size:\\n+ batch = []\\n+ idx += 1\\n+\\n+ # Make sure we yield a multiple of self.num_processes batches\\n+ cycle_index = 0\\n+ while idx % self.num_processes != 0 or len(batch) > 0:\\n+ end_index = cycle_index + self.batch_size - len(batch)\\n+ batch += initial_data[cycle_index:end_index]\\n+ if idx % self.num_processes == self.process_index:\\n+ yield batch\\n+ cycle_index = end_index\\n+ batch = []\\n+ idx += 1\\n \\n \\n class IterableDatasetShard(IterableDataset):\\ndiff --git a/tests/test_data_loader.py b/tests/test_data_loader.py\\nindex 2d3b6a394..a18dae732 100644\\n--- a/tests/test_data_loader.py\\n+++ b/tests/test_data_loader.py\\n@@ -36,8 +36,11 @@ def __iter__(self):\\n \\n \\n class DataLoaderTester(unittest.TestCase):\\n- def check_batch_sampler_shards(self, batch_sampler, expected, split_batches=False):\\n- batch_sampler_shards = [BatchSamplerShard(batch_sampler, 2, i, split_batches) for i in range(2)]\\n+ def check_batch_sampler_shards(self, batch_sampler, expected, split_batches=False, even_batches=True):\\n+ batch_sampler_shards = [\\n+ BatchSamplerShard(batch_sampler, 2, i, split_batches=split_batches, even_batches=even_batches)\\n+ for i in range(2)\\n+ ]\\n batch_sampler_lists = [list(batch_sampler_shard) for batch_sampler_shard in batch_sampler_shards]\\n if not split_batches:\\n self.assertListEqual([len(shard) for shard in batch_sampler_shards], [len(e) for e in expected])\\n@@ -164,6 +167,137 @@ def test_batch_sampler_shards_with_splits(self):\\n expected = [[], []]\\n self.check_batch_sampler_shards(batch_sampler, expected, split_batches=True)\\n \\n+ def test_batch_sampler_shards_with_no_splits_no_even(self):\\n+ # Check the shards when the dataset is a round multiple of total batch size.\\n+ batch_sampler = BatchSampler(range(24), batch_size=3, drop_last=False)\\n+ expected = [\\n+ [[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],\\n+ [[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 22, 23]],\\n+ ]\\n+ self.check_batch_sampler_shards(batch_sampler, expected, even_batches=False)\\n+\\n+ batch_sampler = BatchSampler(range(24), batch_size=3, drop_last=True)\\n+ # Expected shouldn\\'t change\\n+ self.check_batch_sampler_shards(batch_sampler, expected, even_batches=False)\\n+\\n+ # Check the shards when the dataset is a round multiple of batch size but not total batch size.\\n+ batch_sampler = BatchSampler(range(21), batch_size=3, drop_last=False)\\n+ expected = [\\n+ [[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],\\n+ [[3, 4, 5], [9, 10, 11], [15, 16, 17]],\\n+ ]\\n+ self.check_batch_sampler_shards(batch_sampler, expected, even_batches=False)\\n+\\n+ batch_sampler = BatchSampler(range(21), batch_size=3, drop_last=True)\\n+ expected = [\\n+ [[0, 1, 2], [6, 7, 8], [12, 13, 14]],\\n+ [[3, 4, 5], [9, 10, 11], [15, 16, 17]],\\n+ ]\\n+ self.check_batch_sampler_shards(batch_sampler, expected, even_batches=False)\\n+\\n+ # Check the shards when the dataset is not a round multiple of batch size but has a multiple of\\n+ # num_processes batch.\\n+ batch_sampler = BatchSampler(range(22), batch_size=3, drop_last=False)\\n+ expected = [\\n+ [[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],\\n+ [[3, 4, 5], [9, 10, 11], [15, 16, 17], [21]],\\n+ ]\\n+ self.check_batch_sampler_shards(batch_sampler, expected, even_batches=False)\\n+\\n+ batch_sampler = BatchSampler(range(22), batch_size=3, drop_last=True)\\n+ expected = [\\n+ [[0, 1, 2], [6, 7, 8], [12, 13, 14]],\\n+ [[3, 4, 5], [9, 10, 11], [15, 16, 17]],\\n+ ]\\n+ self.check_batch_sampler_shards(batch_sampler, expected, even_batches=False)\\n+\\n+ # Check the shards when the dataset is not a round multiple of batch size but and has not a multiple of\\n+ # num_processes batch.\\n+ batch_sampler = BatchSampler(range(20), batch_size=3, drop_last=False)\\n+ expected = [\\n+ [[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19]],\\n+ [[3, 4, 5], [9, 10, 11], [15, 16, 17]],\\n+ ]\\n+ self.check_batch_sampler_shards(batch_sampler, expected, even_batches=False)\\n+\\n+ batch_sampler = BatchSampler(range(20), batch_size=3, drop_last=True)\\n+ expected = [\\n+ [[0, 1, 2], [6, 7, 8], [12, 13, 14]],\\n+ [[3, 4, 5], [9, 10, 11], [15, 16, 17]],\\n+ ]\\n+ self.check_batch_sampler_shards(batch_sampler, expected, even_batches=False)\\n+\\n+ # Check the shards when the dataset is very small.\\n+ batch_sampler = BatchSampler(range(2), batch_size=3, drop_last=False)\\n+ expected = [[[0, 1]], []]\\n+ self.check_batch_sampler_shards(batch_sampler, expected, even_batches=False)\\n+\\n+ batch_sampler = BatchSampler(range(2), batch_size=3, drop_last=True)\\n+ expected = [[], []]\\n+ self.check_batch_sampler_shards(batch_sampler, expected, even_batches=False)\\n+\\n+ def test_batch_sampler_shards_with_splits_no_even(self):\\n+ # Check the shards when the dataset is a round multiple of batch size.\\n+ batch_sampler = BatchSampler(range(24), batch_size=4, drop_last=False)\\n+ expected = [\\n+ [[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],\\n+ [[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [22, 23]],\\n+ ]\\n+ self.check_batch_sampler_shards(batch_sampler, expected, split_batches=True, even_batches=False)\\n+\\n+ batch_sampler = BatchSampler(range(24), batch_size=4, drop_last=True)\\n+ # Expected shouldn\\'t change\\n+ self.check_batch_sampler_shards(batch_sampler, expected, split_batches=True, even_batches=False)\\n+\\n+ # Check the shards when the dataset is not a round multiple of batch size.\\n+ batch_sampler = BatchSampler(range(22), batch_size=4, drop_last=False)\\n+ expected = [\\n+ [[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],\\n+ [[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],\\n+ ]\\n+ self.check_batch_sampler_shards(batch_sampler, expected, split_batches=True, even_batches=False)\\n+\\n+ batch_sampler = BatchSampler(range(22), batch_size=4, drop_last=True)\\n+ expected = [\\n+ [[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],\\n+ [[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],\\n+ ]\\n+ self.check_batch_sampler_shards(batch_sampler, expected, split_batches=True, even_batches=False)\\n+\\n+ # Check the shards when the dataset is not a round multiple of batch size or num_processes.\\n+ batch_sampler = BatchSampler(range(21), batch_size=4, drop_last=False)\\n+ expected = [\\n+ [[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20]],\\n+ [[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],\\n+ ]\\n+ self.check_batch_sampler_shards(batch_sampler, expected, split_batches=True, even_batches=False)\\n+\\n+ batch_sampler = BatchSampler(range(21), batch_size=4, drop_last=True)\\n+ expected = [\\n+ [[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],\\n+ [[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],\\n+ ]\\n+ self.check_batch_sampler_shards(batch_sampler, expected, split_batches=True, even_batches=False)\\n+\\n+ # Check the shards when the dataset is very small.\\n+ batch_sampler = BatchSampler(range(2), batch_size=4, drop_last=False)\\n+ expected = [[[0, 1]], []]\\n+ self.check_batch_sampler_shards(batch_sampler, expected, split_batches=True, even_batches=False)\\n+\\n+ batch_sampler = BatchSampler(range(2), batch_size=4, drop_last=True)\\n+ expected = [[], []]\\n+ self.check_batch_sampler_shards(batch_sampler, expected, split_batches=True, even_batches=False)\\n+\\n+ def test_batch_sampler_with_varying_batch_size(self):\\n+ batch_sampler = [[0, 1, 2], [3, 4], [5, 6, 7, 8], [9, 10, 11], [12, 13]]\\n+ batch_sampler_shards = [BatchSamplerShard(batch_sampler, 2, i, even_batches=False) for i in range(2)]\\n+\\n+ self.assertEqual(len(batch_sampler_shards[0]), 3)\\n+ self.assertEqual(len(batch_sampler_shards[1]), 2)\\n+\\n+ self.assertListEqual(list(batch_sampler_shards[0]), [[0, 1, 2], [5, 6, 7, 8], [12, 13]])\\n+ self.assertListEqual(list(batch_sampler_shards[1]), [[3, 4], [9, 10, 11]])\\n+\\n def check_iterable_dataset_shards(\\n self, dataset, seed, batch_size, drop_last=False, num_processes=2, split_batches=False\\n ):\\n',\n", - " 'code_comments': [{'body': '```suggestion\\r\\n Whether or not to loop back at the beginning of the sampler when the number of samples is not a round\\r\\n multiple of (original batch size / number of processes).\\r\\n```',\n", - " 'diff_hunk': '@@ -112,6 +112,9 @@ class BatchSamplerShard(BatchSampler):\\n this argument is set to `False`.\\n - the sampler on process 0 to yield `[0, 1]` then `[4, 5]` and the sampler on process 1 to yield `[2, 3]`\\n then `[6, 7]` if this argument is set to `True`.\\n+ even_batches (`bool`, *optional*, defaults to `True`):\\n+ Whether or not to loop back at the beginning of the sampler when the number of samples is not a round\\n+ multiple of the batch size/number of processes.',\n", - " 'from_author': False},\n", - " {'body': \"`self.batch_size` can't be `None` else this logic would result in errors. \",\n", - " 'diff_hunk': '@@ -182,35 +201,41 @@ def _iter_with_no_split(self):\\n # yielding it.\\n if idx % self.num_processes == self.process_index:\\n batch_to_yield = batch\\n- if idx % self.num_processes == self.num_processes - 1 and len(batch) == self.batch_size:\\n+ if idx % self.num_processes == self.num_processes - 1 and (\\n+ self.batch_size is None or len(batch) == self.batch_size\\n+ ):\\n yield batch_to_yield\\n batch_to_yield = []\\n \\n # If drop_last is True, iteration is over, otherwise...\\n if not self.drop_last and len(initial_data) > 0:\\n- # ... we yield the complete batch we had saved before if it has the proper length\\n- if len(batch_to_yield) == self.batch_size:\\n- yield batch_to_yield\\n-\\n- # For degenerate cases where the dataset has less than num_process * batch_size samples\\n- while len(initial_data) < self.num_processes * self.batch_size:\\n- initial_data += initial_data\\n-\\n- # If the last batch seen was of the proper size, it has been yielded by its process so we move to the next\\n- if len(batch) == self.batch_size:\\n- batch = []\\n- idx += 1\\n-\\n- # Make sure we yield a multiple of self.num_processes batches\\n- cycle_index = 0\\n- while idx % self.num_processes != 0 or len(batch) > 0:\\n- end_index = cycle_index + self.batch_size - len(batch)\\n- batch += initial_data[cycle_index:end_index]\\n- if idx % self.num_processes == self.process_index:\\n- yield batch\\n- cycle_index = end_index\\n- batch = []\\n- idx += 1\\n+ if not self.even_batches:\\n+ if len(batch_to_yield) > 0:\\n+ yield batch_to_yield\\n+ else:\\n+ # ... we yield the complete batch we had saved before if it has the proper length\\n+ if len(batch_to_yield) == self.batch_size:\\n+ yield batch_to_yield\\n+\\n+ # For degenerate cases where the dataset has less than num_process * batch_size samples\\n+ while len(initial_data) < self.num_processes * self.batch_size:\\n+ initial_data += initial_data\\n+\\n+ # If the last batch seen was of the proper size, it has been yielded by its process so we move to the next\\n+ if len(batch) == self.batch_size:\\n+ batch = []\\n+ idx += 1\\n+\\n+ # Make sure we yield a multiple of self.num_processes batches\\n+ cycle_index = 0\\n+ while idx % self.num_processes != 0 or len(batch) > 0:\\n+ end_index = cycle_index + self.batch_size - len(batch)\\n+ batch += initial_data[cycle_index:end_index]\\n+ if idx % self.num_processes == self.process_index:\\n+ yield batch\\n+ cycle_index = end_index\\n+ batch = []\\n+ idx += 1',\n", - " 'from_author': False},\n", - " {'body': \"Yes it can't be `None` if `even_batches` is not `False`, will add an early check for that.\",\n", - " 'diff_hunk': '@@ -182,35 +201,41 @@ def _iter_with_no_split(self):\\n # yielding it.\\n if idx % self.num_processes == self.process_index:\\n batch_to_yield = batch\\n- if idx % self.num_processes == self.num_processes - 1 and len(batch) == self.batch_size:\\n+ if idx % self.num_processes == self.num_processes - 1 and (\\n+ self.batch_size is None or len(batch) == self.batch_size\\n+ ):\\n yield batch_to_yield\\n batch_to_yield = []\\n \\n # If drop_last is True, iteration is over, otherwise...\\n if not self.drop_last and len(initial_data) > 0:\\n- # ... we yield the complete batch we had saved before if it has the proper length\\n- if len(batch_to_yield) == self.batch_size:\\n- yield batch_to_yield\\n-\\n- # For degenerate cases where the dataset has less than num_process * batch_size samples\\n- while len(initial_data) < self.num_processes * self.batch_size:\\n- initial_data += initial_data\\n-\\n- # If the last batch seen was of the proper size, it has been yielded by its process so we move to the next\\n- if len(batch) == self.batch_size:\\n- batch = []\\n- idx += 1\\n-\\n- # Make sure we yield a multiple of self.num_processes batches\\n- cycle_index = 0\\n- while idx % self.num_processes != 0 or len(batch) > 0:\\n- end_index = cycle_index + self.batch_size - len(batch)\\n- batch += initial_data[cycle_index:end_index]\\n- if idx % self.num_processes == self.process_index:\\n- yield batch\\n- cycle_index = end_index\\n- batch = []\\n- idx += 1\\n+ if not self.even_batches:\\n+ if len(batch_to_yield) > 0:\\n+ yield batch_to_yield\\n+ else:\\n+ # ... we yield the complete batch we had saved before if it has the proper length\\n+ if len(batch_to_yield) == self.batch_size:\\n+ yield batch_to_yield\\n+\\n+ # For degenerate cases where the dataset has less than num_process * batch_size samples\\n+ while len(initial_data) < self.num_processes * self.batch_size:\\n+ initial_data += initial_data\\n+\\n+ # If the last batch seen was of the proper size, it has been yielded by its process so we move to the next\\n+ if len(batch) == self.batch_size:\\n+ batch = []\\n+ idx += 1\\n+\\n+ # Make sure we yield a multiple of self.num_processes batches\\n+ cycle_index = 0\\n+ while idx % self.num_processes != 0 or len(batch) > 0:\\n+ end_index = cycle_index + self.batch_size - len(batch)\\n+ batch += initial_data[cycle_index:end_index]\\n+ if idx % self.num_processes == self.process_index:\\n+ yield batch\\n+ cycle_index = end_index\\n+ batch = []\\n+ idx += 1',\n", - " 'from_author': True}],\n", - " 'context': [{'body': '_The documentation is not available anymore as the PR was closed or merged._',\n", - " 'from_author': False}],\n", - " 'url': 'https://api.github.com/repos/huggingface/accelerate/pulls/776'},\n", - " 1091296444: {'diff': 'diff --git a/tests/test_tracking.py b/tests/test_tracking.py\\nindex 5e26eb9b5..d6ff4908a 100644\\n--- a/tests/test_tracking.py\\n+++ b/tests/test_tracking.py\\n@@ -17,6 +17,7 @@\\n import logging\\n import os\\n import re\\n+import subprocess\\n import tempfile\\n import unittest\\n import zipfile\\n@@ -85,59 +86,61 @@ def setUp(self):\\n self.add_mocks(mock.patch.dict(os.environ, {\"WANDB_DIR\": self.tmpdir}))\\n \\n @staticmethod\\n- def get_value_from_log(key: str, log: str, key_occurrence: int = 0):\\n+ def parse_log(log: str, section: str, record: bool = True):\\n \"\"\"\\n- Parses wandb log for `key` and returns the value.\\n- If parsing through multiple calls to .log, pass in a `key_occurrence`\\n+ Parses wandb log for `section` and returns a dictionary of\\n+ all items in that section. Section names are based on the\\n+ output of `wandb sync --view --verbose` and items starting\\n+ with \"Record\" in that result\\n \"\"\"\\n- res = re.findall(rf\"(?<={key} )[^\\\\s]+\", log)[key_occurrence]\\n- if \\'\"\\' in res:\\n- return re.findall(r\\'\"([^\"]*)\"\\', res)[0]\\n+ # Big thanks to the W&B team for helping us parse their logs\\n+ pattern = rf\"{section} ([\\\\S\\\\s]*?)\\\\n\\\\n\"\\n+ if record:\\n+ pattern = rf\"Record: {pattern}\"\\n+ cleaned_record = re.findall(pattern, log)[0]\\n+ # A config\\n+ if section == \"config\" or section == \"history\":\\n+ cleaned_record = re.findall(r\\'\"([a-zA-Z0-9_.,]+)\\', cleaned_record)\\n+ return {key: val for key, val in zip(cleaned_record[0::2], cleaned_record[1::2])}\\n+ # Everything else\\n else:\\n- return res\\n+ return dict(re.findall(r\\'(\\\\w+): \"([^\\\\s]+)\"\\', cleaned_record))\\n \\n- def test_init_trackers(self):\\n+ def test_wandb(self):\\n project_name = \"test_project_with_config\"\\n accelerator = Accelerator(log_with=\"wandb\")\\n config = {\"num_iterations\": 12, \"learning_rate\": 1e-2, \"some_boolean\": False, \"some_string\": \"some_value\"}\\n kwargs = {\"wandb\": {\"tags\": [\"my_tag\"]}}\\n accelerator.init_trackers(project_name, config, kwargs)\\n- accelerator.end_training()\\n- # The latest offline log is stored at wandb/latest-run/*.wandb\\n- for child in Path(f\"{self.tmpdir}/wandb/latest-run\").glob(\"*\"):\\n- logger.info(child)\\n- if child.is_file() and child.suffix == \".wandb\":\\n- with open(child, \"rb\") as f:\\n- content = f.read()\\n- break\\n-\\n- # Check HPS through careful parsing and cleaning\\n- cleaned_log = re.sub(r\"[\\\\x00-\\\\x1f]+\", \" \", content.decode(\"utf8\", \"ignore\"))\\n- self.assertEqual(self.get_value_from_log(\"num_iterations\", cleaned_log), \"12\")\\n- self.assertEqual(self.get_value_from_log(\"learning_rate\", cleaned_log), \"0.01\")\\n- self.assertEqual(self.get_value_from_log(\"some_boolean\", cleaned_log), \"false\")\\n- self.assertEqual(self.get_value_from_log(\"some_string\", cleaned_log), \"some_value\")\\n- self.assertIn(\"my_tag\", cleaned_log)\\n-\\n- def test_log(self):\\n- project_name = \"test_project_with_log\"\\n- accelerator = Accelerator(log_with=\"wandb\")\\n- accelerator.init_trackers(project_name)\\n values = {\"total_loss\": 0.1, \"iteration\": 1, \"my_text\": \"some_value\"}\\n accelerator.log(values, step=0)\\n accelerator.end_training()\\n # The latest offline log is stored at wandb/latest-run/*.wandb\\n for child in Path(f\"{self.tmpdir}/wandb/latest-run\").glob(\"*\"):\\n if child.is_file() and child.suffix == \".wandb\":\\n- with open(child, \"rb\") as f:\\n- content = f.read()\\n+ content = subprocess.check_output(\\n+ [\"wandb\", \"sync\", \"--view\", \"--verbose\", str(child)], env=os.environ.copy()\\n+ ).decode(\"utf8\", \"ignore\")\\n break\\n+\\n # Check HPS through careful parsing and cleaning\\n- cleaned_log = re.sub(r\"[\\\\x00-\\\\x1f]+\", \" \", content.decode(\"utf8\", \"ignore\"))\\n- self.assertTrue(\"0.1\" in self.get_value_from_log(\"total_loss\", cleaned_log))\\n- self.assertTrue(\"1\" in self.get_value_from_log(\"iteration\", cleaned_log))\\n- self.assertTrue(\"some_value\" in self.get_value_from_log(\"my_text\", cleaned_log))\\n- self.assertTrue(\"0\" in self.get_value_from_log(\"_step\", cleaned_log))\\n+ logged_items = self.parse_log(content, \"config\")\\n+ self.assertEqual(logged_items[\"num_iterations\"], \"12\")\\n+ self.assertEqual(logged_items[\"learning_rate\"], \"0.01\")\\n+ self.assertEqual(logged_items[\"some_boolean\"], \"false\")\\n+ self.assertEqual(logged_items[\"some_string\"], \"some_value\")\\n+ self.assertEqual(logged_items[\"some_string\"], \"some_value\")\\n+\\n+ # Run tags\\n+ logged_items = self.parse_log(content, \"run\", False)\\n+ self.assertEqual(logged_items[\"tags\"], \"my_tag\")\\n+\\n+ # Actual logging\\n+ logged_items = self.parse_log(content, \"history\")\\n+ self.assertEqual(logged_items[\"total_loss\"], \"0.1\")\\n+ self.assertEqual(logged_items[\"iteration\"], \"1\")\\n+ self.assertEqual(logged_items[\"my_text\"], \"some_value\")\\n+ self.assertEqual(logged_items[\"_step\"], \"0\")\\n \\n \\n # Comet has a special `OfflineExperiment` we need to use for testing\\n',\n", - " 'code_comments': [],\n", - " 'context': [{'body': '_The documentation is not available anymore as the PR was closed or merged._',\n", - " 'from_author': False}],\n", - " 'url': 'https://api.github.com/repos/huggingface/accelerate/pulls/775'},\n", - " 1090989496: {'diff': 'diff --git a/src/accelerate/utils/modeling.py b/src/accelerate/utils/modeling.py\\nindex 08ee3a65a..1760e72bc 100644\\n--- a/src/accelerate/utils/modeling.py\\n+++ b/src/accelerate/utils/modeling.py\\n@@ -365,7 +365,7 @@ def get_balanced_memory(\\n if not torch.cuda.is_available():\\n return max_memory\\n \\n- num_devices = len([d for d in max_memory if torch.device(d).type == \"cuda\"])\\n+ num_devices = len([d for d in max_memory if torch.device(d).type == \"cuda\" and max_memory[d] > 0])\\n module_sizes = compute_module_sizes(model, dtype=dtype)\\n per_gpu = module_sizes[\"\"] // (num_devices - 1 if low_zero else num_devices)\\n \\n@@ -409,7 +409,7 @@ def get_balanced_memory(\\n \\n max_memory = get_max_memory(max_memory)\\n # The last device is left with max_memory just in case the buffer is not enough.\\n- for i in range(num_devices - 1):\\n+ for i in range(len(max_memory) - 1):\\n max_memory[i] = min(0 if low_zero and i == 0 else per_gpu, max_memory[i])\\n \\n if low_zero:\\ndiff --git a/tests/test_modeling_utils.py b/tests/test_modeling_utils.py\\nindex 1c6f6088d..02ab867d7 100644\\n--- a/tests/test_modeling_utils.py\\n+++ b/tests/test_modeling_utils.py\\n@@ -375,3 +375,7 @@ def test_get_balanced_memory(self):\\n # Last device always get max memory to give more buffer and avoid accidental CPU offload\\n max_memory = get_balanced_memory(model, max_memory={0: 300, 1: 500})\\n self.assertDictEqual({0: 215, 1: 500}, max_memory)\\n+\\n+ # If we set a device to 0, it\\'s not counted.\\n+ max_memory = get_balanced_memory(model, max_memory={0: 0, 1: 300, 2: 300})\\n+ self.assertDictEqual({0: 0, 1: 215, 2: 300}, max_memory)\\n',\n", - " 'code_comments': [],\n", - " 'context': [{'body': '_The documentation is not available anymore as the PR was closed or merged._',\n", - " 'from_author': False}],\n", - " 'url': 'https://api.github.com/repos/huggingface/accelerate/pulls/774'},\n", - " 1090961915: {'diff': 'diff --git a/.github/workflows/build-docker-images-release.yml b/.github/workflows/build-docker-images-release.yml\\nindex 654259f87..be0c76c48 100644\\n--- a/.github/workflows/build-docker-images-release.yml\\n+++ b/.github/workflows/build-docker-images-release.yml\\n@@ -17,7 +17,7 @@ jobs:\\n steps:\\n - uses: actions/checkout@v3\\n - id: step1\\n- run: echo \"::set-output name=version::$(python setup.py --version)\"\\n+ run: echo \"version=$(python setup.py --version)\" >> $GITHUB_OUTPUT\\n \\n version-cpu:\\n name: \"Latest Accelerate CPU [version]\"\\ndiff --git a/.github/workflows/build_and_run_tests.yml b/.github/workflows/build_and_run_tests.yml\\nindex abf9f3883..68e11c44a 100644\\n--- a/.github/workflows/build_and_run_tests.yml\\n+++ b/.github/workflows/build_and_run_tests.yml\\n@@ -16,7 +16,7 @@ jobs:\\n outputs:\\n changed: ${{ steps.was_changed.outputs.changed }}\\n steps:\\n- - uses: actions/checkout@v3\\n+ - uses: actions/checkout@v3.1.0\\n with: \\n fetch-depth: \"2\"\\n \\n@@ -29,7 +29,7 @@ jobs:\\n run: |\\n for file in ${{ steps.changed-files.outputs.all_changed_files }}; do\\n if [ `basename \"${file}\"` == \"setup.py\" ]; then\\n- echo ::set-output name=changed::\"1\"\\n+ echo \"changed=1\" >> $GITHUB_OUTPUT\\n fi\\n done\\n \\ndiff --git a/.github/workflows/test.yml b/.github/workflows/test.yml\\nindex 2959316d8..691397b0d 100644\\n--- a/.github/workflows/test.yml\\n+++ b/.github/workflows/test.yml\\n@@ -38,7 +38,7 @@ jobs:\\n test_rest\\n ]\\n steps:\\n- - uses: actions/checkout@v3\\n+ - uses: actions/checkout@v3.1.0\\n - name: Set up python 3.7\\n uses: actions/setup-python@v3\\n with:\\n@@ -50,7 +50,7 @@ jobs:\\n path: |\\n ${{ env.pythonLocation }}\\n ${{ env.HF_HOME }}\\n- key: ${{ env.pythonLocation }}-${{ matrix.test-kind }}-${{ hashFiles(\\'setup.py\\') }}\\n+ key: ${{ env.pythonLocation }}-${{ matrix.pytorch-version }}-${{ matrix.test-kind }}-${{ hashFiles(\\'setup.py\\') }}\\n \\n - name: Install the library\\n run: |\\n@@ -62,6 +62,8 @@ jobs:\\n pip install pytest-reportlog\\n \\n - name: Run Tests\\n+ env: \\n+ PYTORCH_VERSION: ${{ matrix.pytorch-version }}\\n run: |\\n make ${{ matrix.test-kind }}\\n \\ndiff --git a/Makefile b/Makefile\\nindex 558e98347..9aaecb076 100644\\n--- a/Makefile\\n+++ b/Makefile\\n@@ -25,43 +25,43 @@ style:\\n \\t\\n # Run tests for the library\\n test:\\n-\\tpython -m pytest -s -v ./tests/ --ignore=./tests/test_examples.py $(if $(IS_GITHUB_CI),--report-log \\'all.log\\',)\\n+\\tpython -m pytest -s -v ./tests/ --ignore=./tests/test_examples.py $(if $(IS_GITHUB_CI),--report-log \"$(PYTORCH_VERSION)_all.log\",)\\n \\n test_big_modeling:\\n-\\tpython -m pytest -s -v ./tests/test_big_modeling.py $(if $(IS_GITHUB_CI),--report-log \\'big_modeling.log\\',)\\n+\\tpython -m pytest -s -v ./tests/test_big_modeling.py $(if $(IS_GITHUB_CI),--report-log \"$(PYTORCH_VERSION)_big_modeling.log\",)\\n \\n test_core:\\n \\tpython -m pytest -s -v ./tests/ --ignore=./tests/test_examples.py --ignore=./tests/deepspeed --ignore=./tests/test_big_modeling.py \\\\\\n-\\t--ignore=./tests/fsdp --ignore=./tests/test_cli.py $(if $(IS_GITHUB_CI),--report-log \\'core.log\\',)\\n+\\t--ignore=./tests/fsdp --ignore=./tests/test_cli.py $(if $(IS_GITHUB_CI),--report-log \"$(PYTORCH_VERSION)_core.log\",)\\n \\n test_cli:\\n-\\tpython -m pytest -s -v ./tests/test_cli.py $(if $(IS_GITHUB_CI),--report-log \\'cli.log\\',)\\n+\\tpython -m pytest -s -v ./tests/test_cli.py $(if $(IS_GITHUB_CI),--report-log \"$(PYTORCH_VERSION)_cli.log\",)\\n \\n test_deepspeed:\\n-\\tpython -m pytest -s -v ./tests/deepspeed $(if $(IS_GITHUB_CI),--report-log \\'deepspeed.log\\',)\\n+\\tpython -m pytest -s -v ./tests/deepspeed $(if $(IS_GITHUB_CI),--report-log \"$(PYTORCH_VERSION)_deepspeed.log\",)\\n \\n test_fsdp:\\n-\\tpython -m pytest -s -v ./tests/fsdp $(if $(IS_GITHUB_CI),--report-log \\'fsdp.log\\',)\\n+\\tpython -m pytest -s -v ./tests/fsdp $(if $(IS_GITHUB_CI),--report-log \"$(PYTORCH_VERSION)_fsdp.log\",)\\n \\n test_examples:\\n-\\tpython -m pytest -s -v ./tests/test_examples.py $(if $(IS_GITHUB_CI),--report-log \\'examples.log\\',)\\n+\\tpython -m pytest -s -v ./tests/test_examples.py $(if $(IS_GITHUB_CI),--report-log \"$(PYTORCH_VERSION)_examples.log\",)\\n \\n # Broken down example tests for the CI runners\\n test_integrations:\\n-\\tpython -m pytest -s -v ./tests/deepspeed ./tests/fsdp $(if $(IS_GITHUB_CI),--report-log \\'integrations.log\\',)\\n+\\tpython -m pytest -s -v ./tests/deepspeed ./tests/fsdp $(if $(IS_GITHUB_CI),--report-log \"$(PYTORCH_VERSION)_integrations.log\",)\\n \\n test_example_differences:\\n-\\tpython -m pytest -s -v ./tests/test_examples.py::ExampleDifferenceTests $(if $(IS_GITHUB_CI),--report-log \\'example_diff.log\\',)\\n+\\tpython -m pytest -s -v ./tests/test_examples.py::ExampleDifferenceTests $(if $(IS_GITHUB_CI),--report-log \"$(PYTORCH_VERSION)_example_diff.log\",)\\n \\n test_checkpoint_epoch:\\n-\\tpython -m pytest -s -v ./tests/test_examples.py::FeatureExamplesTests -k \"by_epoch\" $(if $(IS_GITHUB_CI),--report-log \\'checkpoint_epoch.log\\',)\\n+\\tpython -m pytest -s -v ./tests/test_examples.py::FeatureExamplesTests -k \"by_epoch\" $(if $(IS_GITHUB_CI),--report-log \"$(PYTORCH_VERSION)_checkpoint_epoch.log\",)\\n \\n test_checkpoint_step:\\n-\\tpython -m pytest -s -v ./tests/test_examples.py::FeatureExamplesTests -k \"by_step\" $(if $(IS_GITHUB_CI),--report-log \\'checkpoint_step.log\\',)\\n+\\tpython -m pytest -s -v ./tests/test_examples.py::FeatureExamplesTests -k \"by_step\" $(if $(IS_GITHUB_CI),--report-log \"$(PYTORCH_VERSION)_checkpoint_step.log\",)\\n \\n # Same as test but used to install only the base dependencies\\n test_prod:\\n \\t$(MAKE) test_core\\n \\n test_rest:\\n-\\tpython -m pytest -s -v ./tests/test_examples.py::FeatureExamplesTests -k \"not by_step and not by_epoch\" $(if $(IS_GITHUB_CI),--report-log \\'rest.log\\',)\\n+\\tpython -m pytest -s -v ./tests/test_examples.py::FeatureExamplesTests -k \"not by_step and not by_epoch\" $(if $(IS_GITHUB_CI),--report-log \"$(PYTORCH_VERSION)_rest.log\",)\\ndiff --git a/utils/log_reports.py b/utils/log_reports.py\\nindex f701f08c2..7f790dad3 100644\\n--- a/utils/log_reports.py\\n+++ b/utils/log_reports.py\\n@@ -1,6 +1,5 @@\\n import json\\n-from pathlib import Path \\n-import subprocess\\n+from pathlib import Path\\n \\n failed = []\\n passed = []\\n@@ -19,16 +18,16 @@\\n duration = f\\'{line[\"duration\"]:.4f}\\'\\n if line.get(\"outcome\", \"\") == \"failed\":\\n section_num_failed += 1\\n- failed.append([test, duration])\\n+ failed.append([test, duration, log.name.split(\\'_\\')[0]])\\n else:\\n- passed.append([test, duration])\\n+ passed.append([test, duration, log.name.split(\\'_\\')[0]])\\n group_info.append([str(log), section_num_failed])\\n \\n if len(failed) > 0:\\n result = \"## Failed Tests:\\\\n\"\\n- failed_table = \\'| Test Location | Test Class | Test Name |\\\\n|---|---|---|\\\\n| \\'\\n+ failed_table = \\'| Test Location | Test Class | Test Name | PyTorch Version |\\\\n|---|---|---|---|\\\\n| \\'\\n for test in failed:\\n failed_table += \\' | \\'.join(test[0].split(\"::\"))\\n- failed_table += \" |\"\\n+ failed_table += f\" | {test[2]} |\"\\n result += failed_table\\n print(result)\\n\\\\ No newline at end of file\\n',\n", - " 'code_comments': [],\n", - " 'context': [{'body': '_The documentation is not available anymore as the PR was closed or merged._',\n", - " 'from_author': False},\n", - " {'body': 'Thank you @muellerzr for updating my knowledge!',\n", - " 'from_author': False}],\n", - " 'url': 'https://api.github.com/repos/huggingface/accelerate/pulls/773'},\n", - " 1090925271: {'diff': 'diff --git a/.github/workflows/run_merge_tests.yml b/.github/workflows/run_merge_tests.yml\\nindex a794cd72b..fcbb62f53 100644\\n--- a/.github/workflows/run_merge_tests.yml\\n+++ b/.github/workflows/run_merge_tests.yml\\n@@ -29,6 +29,11 @@ jobs:\\n pip install -e .[testing,test_trackers]\\n pip install pytest-reportlog\\n \\n+ - name: Run CLI tests\\n+ run: |\\n+ source activate accelerate\\n+ make test_cli\\n+ \\n - name: Run test on GPUs\\n run: |\\n source activate accelerate\\n@@ -62,6 +67,11 @@ jobs:\\n pip install -e .[testing,test_trackers]\\n pip install pytest-reportlog\\n \\n+ - name: Run CLI tests\\n+ run: |\\n+ source activate accelerate\\n+ make test_cli\\n+\\n - name: Run test on GPUs\\n run: |\\n source activate accelerate\\ndiff --git a/.github/workflows/test.yml b/.github/workflows/test.yml\\nindex 593eec741..2959316d8 100644\\n--- a/.github/workflows/test.yml\\n+++ b/.github/workflows/test.yml\\n@@ -28,6 +28,7 @@ jobs:\\n test-kind: [\\n test_prod,\\n test_core,\\n+ test_cli,\\n test_big_modeling,\\n test_deepspeed,\\n test_fsdp,\\ndiff --git a/Makefile b/Makefile\\nindex 52c594ae7..558e98347 100644\\n--- a/Makefile\\n+++ b/Makefile\\n@@ -32,7 +32,10 @@ test_big_modeling:\\n \\n test_core:\\n \\tpython -m pytest -s -v ./tests/ --ignore=./tests/test_examples.py --ignore=./tests/deepspeed --ignore=./tests/test_big_modeling.py \\\\\\n-\\t--ignore=./tests/fsdp $(if $(IS_GITHUB_CI),--report-log \\'core.log\\',)\\n+\\t--ignore=./tests/fsdp --ignore=./tests/test_cli.py $(if $(IS_GITHUB_CI),--report-log \\'core.log\\',)\\n+\\n+test_cli:\\n+\\tpython -m pytest -s -v ./tests/test_cli.py $(if $(IS_GITHUB_CI),--report-log \\'cli.log\\',)\\n \\n test_deepspeed:\\n \\tpython -m pytest -s -v ./tests/deepspeed $(if $(IS_GITHUB_CI),--report-log \\'deepspeed.log\\',)\\ndiff --git a/src/accelerate/commands/launch.py b/src/accelerate/commands/launch.py\\nindex 65e9be9c7..fa7588b0a 100644\\n--- a/src/accelerate/commands/launch.py\\n+++ b/src/accelerate/commands/launch.py\\n@@ -432,7 +432,7 @@ def simple_launcher(args):\\n current_env[\"USE_MPS_DEVICE\"] = str(args.use_mps_device)\\n if args.use_mps_device:\\n current_env[\"PYTORCH_ENABLE_MPS_FALLBACK\"] = \"1\"\\n- elif args.gpu_ids != \"all\":\\n+ elif args.gpu_ids != \"all\" and args.gpu_ids is not None:\\n current_env[\"CUDA_VISIBLE_DEVICES\"] = args.gpu_ids\\n if args.num_machines > 1:\\n current_env[\"MASTER_ADDR\"] = args.main_process_ip\\n@@ -489,7 +489,7 @@ def multi_gpu_launcher(args):\\n \\n current_env = os.environ.copy()\\n gpu_ids = getattr(args, \"gpu_ids\")\\n- if gpu_ids != \"all\":\\n+ if gpu_ids != \"all\" and args.gpu_ids is not None:\\n current_env[\"CUDA_VISIBLE_DEVICES\"] = gpu_ids\\n mixed_precision = args.mixed_precision.lower()\\n try:\\n@@ -637,7 +637,7 @@ def deepspeed_launcher(args):\\n \\n current_env = os.environ.copy()\\n gpu_ids = getattr(args, \"gpu_ids\")\\n- if gpu_ids != \"all\":\\n+ if gpu_ids != \"all\" and args.gpu_ids is not None:\\n current_env[\"CUDA_VISIBLE_DEVICES\"] = gpu_ids\\n try:\\n mixed_precision = PrecisionType(args.mixed_precision.lower())\\n@@ -947,7 +947,7 @@ def launch_command(args):\\n else:\\n if args.num_processes is None:\\n args.num_processes = torch.cuda.device_count() if args.multi_gpu else 1\\n- warned.append(\"\\\\t`--num_processes` was set to a value of `{args.num_processes}`\")\\n+ warned.append(f\"\\\\t`--num_processes` was set to a value of `{args.num_processes}`\")\\n if args.num_machines is None:\\n warned.append(\"\\\\t`--num_machines` was set to a value of `1`\")\\n args.num_machines = 1\\ndiff --git a/src/accelerate/test_utils/scripts/test_cli.py b/src/accelerate/test_utils/scripts/test_cli.py\\nnew file mode 100644\\nindex 000000000..491410e5f\\n--- /dev/null\\n+++ b/src/accelerate/test_utils/scripts/test_cli.py\\n@@ -0,0 +1,13 @@\\n+import torch\\n+\\n+\\n+def main():\\n+ if torch.cuda.is_available():\\n+ num_gpus = torch.cuda.device_count()\\n+ else:\\n+ num_gpus = 0\\n+ print(f\"Successfully ran on {num_gpus} GPUs\")\\n+\\n+\\n+if __name__ == \"__main__\":\\n+ main()\\ndiff --git a/tests/test_cli.py b/tests/test_cli.py\\nnew file mode 100644\\nindex 000000000..ceed58290\\n--- /dev/null\\n+++ b/tests/test_cli.py\\n@@ -0,0 +1,65 @@\\n+# Copyright 2022 The HuggingFace Team. All rights reserved.\\n+#\\n+# Licensed under the Apache License, Version 2.0 (the \"License\");\\n+# you may not use this file except in compliance with the License.\\n+# You may obtain a copy of the License at\\n+#\\n+# http://www.apache.org/licenses/LICENSE-2.0\\n+#\\n+# Unless required by applicable law or agreed to in writing, software\\n+# distributed under the License is distributed on an \"AS IS\" BASIS,\\n+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\\n+# See the License for the specific language governing permissions and\\n+# limitations under the License.\\n+\\n+import inspect\\n+import os\\n+import unittest\\n+from pathlib import Path\\n+\\n+import torch\\n+\\n+import accelerate\\n+from accelerate.test_utils import execute_subprocess_async\\n+\\n+\\n+class AccelerateLauncherTester(unittest.TestCase):\\n+ \"\"\"\\n+ Test case for verifying the `accelerate launch` CLI operates correctly.\\n+ If a `default_config.yaml` file is located in the cache it will temporarily move it\\n+ for the duration of the tests.\\n+ \"\"\"\\n+\\n+ mod_file = inspect.getfile(accelerate.test_utils)\\n+ test_file_path = os.path.sep.join(mod_file.split(os.path.sep)[:-1] + [\"scripts\", \"test_cli.py\"])\\n+\\n+ base_cmd = [\"accelerate\", \"launch\"]\\n+ config_folder = Path.home() / \".cache/huggingface/accelerate\"\\n+ config_file = \"default_config.yaml\"\\n+ config_path = config_folder / config_file\\n+ changed_path = config_folder / \"_default_config.yaml\"\\n+\\n+ test_config_path = Path(\"tests/test_configs\")\\n+\\n+ @classmethod\\n+ def setUpClass(cls):\\n+ if cls.config_path.is_file():\\n+ cls.config_path.rename(cls.changed_path)\\n+\\n+ @classmethod\\n+ def tearDownClass(cls):\\n+ if cls.changed_path.is_file():\\n+ cls.changed_path.rename(cls.config_path)\\n+\\n+ def test_no_config(self):\\n+ cmd = self.base_cmd\\n+ if torch.cuda.is_available() and (torch.cuda.device_count() > 1):\\n+ cmd += [\"--multi_gpu\"]\\n+ execute_subprocess_async(cmd + [self.test_file_path], env=os.environ.copy())\\n+\\n+ def test_config_compatibility(self):\\n+ for config in sorted(self.test_config_path.glob(\"**/*.yaml\")):\\n+ with self.subTest(config_file=config):\\n+ execute_subprocess_async(\\n+ self.base_cmd + [\"--config_file\", str(config), self.test_file_path], env=os.environ.copy()\\n+ )\\ndiff --git a/tests/test_configs/0_11_0.yaml b/tests/test_configs/0_11_0.yaml\\nnew file mode 100644\\nindex 000000000..9ef829e6b\\n--- /dev/null\\n+++ b/tests/test_configs/0_11_0.yaml\\n@@ -0,0 +1,12 @@\\n+compute_environment: LOCAL_MACHINE\\n+deepspeed_config: {}\\n+distributed_type: \\'NO\\'\\n+fsdp_config: {}\\n+machine_rank: 0\\n+main_process_ip: null\\n+main_process_port: null\\n+main_training_function: main\\n+mixed_precision: \\'no\\'\\n+num_machines: 1\\n+num_processes: 1\\n+use_cpu: false\\n\\\\ No newline at end of file\\ndiff --git a/tests/test_configs/0_12_0.yaml b/tests/test_configs/0_12_0.yaml\\nnew file mode 100644\\nindex 000000000..00d06aab9\\n--- /dev/null\\n+++ b/tests/test_configs/0_12_0.yaml\\n@@ -0,0 +1,13 @@\\n+compute_environment: LOCAL_MACHINE\\n+deepspeed_config: {}\\n+distributed_type: \\'NO\\'\\n+downcast_bf16: \\'no\\'\\n+fsdp_config: {}\\n+machine_rank: 0\\n+main_process_ip: null\\n+main_process_port: null\\n+main_training_function: main\\n+mixed_precision: \\'no\\'\\n+num_machines: 1\\n+num_processes: 1\\n+use_cpu: false\\n\\\\ No newline at end of file\\ndiff --git a/tests/test_configs/README.md b/tests/test_configs/README.md\\nnew file mode 100644\\nindex 000000000..fd88d066c\\n--- /dev/null\\n+++ b/tests/test_configs/README.md\\n@@ -0,0 +1,2 @@\\n+This folder contains test configs for `accelerate config`. These should be generated for each major version\\n+and are written based on `accelerate config` and selecting the \"No distributed training\" option.\\n\\\\ No newline at end of file\\ndiff --git a/tests/test_configs/latest.yaml b/tests/test_configs/latest.yaml\\nnew file mode 100644\\nindex 000000000..87b294cf8\\n--- /dev/null\\n+++ b/tests/test_configs/latest.yaml\\n@@ -0,0 +1,17 @@\\n+compute_environment: LOCAL_MACHINE\\n+deepspeed_config: {}\\n+distributed_type: \\'NO\\'\\n+downcast_bf16: \\'no\\'\\n+fsdp_config: {}\\n+gpu_ids: all\\n+machine_rank: 0\\n+main_process_ip: null\\n+main_process_port: null\\n+main_training_function: main\\n+megatron_lm_config: {}\\n+mixed_precision: \\'no\\'\\n+num_machines: 1\\n+num_processes: 1\\n+rdzv_backend: static\\n+same_network: true\\n+use_cpu: false\\n',\n", - " 'code_comments': [{'body': 'This addition is needed for the test case of `CUDA_VISIBLE_DEVICES=\"\"`',\n", - " 'diff_hunk': '@@ -489,7 +489,7 @@ def multi_gpu_launcher(args):\\n \\n current_env = os.environ.copy()\\n gpu_ids = getattr(args, \"gpu_ids\")\\n- if gpu_ids != \"all\":\\n+ if gpu_ids != \"all\" and args.gpu_ids is not None:',\n", - " 'from_author': True}],\n", - " 'context': [{'body': '_The documentation is not available anymore as the PR was closed or merged._',\n", - " 'from_author': False}],\n", - " 'url': 'https://api.github.com/repos/huggingface/accelerate/pulls/772'},\n", - " 1090006484: {'diff': 'diff --git a/src/accelerate/big_modeling.py b/src/accelerate/big_modeling.py\\nindex 9d57c144f..83c6738af 100644\\n--- a/src/accelerate/big_modeling.py\\n+++ b/src/accelerate/big_modeling.py\\n@@ -143,6 +143,8 @@ def cpu_offload(\\n execution_device = next(iter(model.parameters())).device\\n if state_dict is None:\\n state_dict = {n: p.to(\"cpu\") for n, p in model.state_dict().items()}\\n+\\n+ add_hook_to_module(model, AlignDevicesHook(io_same_device=True), append=True)\\n attach_align_device_hook(\\n model,\\n execution_device=execution_device,\\n@@ -151,7 +153,7 @@ def cpu_offload(\\n weights_map=state_dict,\\n preload_module_classes=preload_module_classes,\\n )\\n- add_hook_to_module(model, AlignDevicesHook(io_same_device=True))\\n+\\n return model\\n \\n \\n@@ -189,6 +191,8 @@ def disk_offload(\\n if execution_device is None:\\n execution_device = next(iter(model.parameters())).device\\n weights_map = OffloadedWeightsLoader(save_folder=offload_dir)\\n+\\n+ add_hook_to_module(model, AlignDevicesHook(io_same_device=True), append=True)\\n attach_align_device_hook(\\n model,\\n execution_device=execution_device,\\n@@ -197,7 +201,7 @@ def disk_offload(\\n weights_map=weights_map,\\n preload_module_classes=preload_module_classes,\\n )\\n- add_hook_to_module(model, AlignDevicesHook(io_same_device=True))\\n+\\n return model\\n \\n \\ndiff --git a/src/accelerate/hooks.py b/src/accelerate/hooks.py\\nindex 493444daa..120bb7f42 100644\\n--- a/src/accelerate/hooks.py\\n+++ b/src/accelerate/hooks.py\\n@@ -108,26 +108,34 @@ def detach_hook(self, module):\\n return module\\n \\n \\n-def add_hook_to_module(module: nn.Module, hook: ModelHook):\\n+def add_hook_to_module(module: nn.Module, hook: ModelHook, append: bool = False):\\n \"\"\"\\n Adds a hook to a given module. This will rewrite the `forward` method of the module to include the hook, to remove\\n this behavior and restore the original `forward` method, use `remove_hook_from_module`.\\n \\n \\n \\n- If the module already contains a hook, this will replace it with the new hook passed. To chain two hooks together,\\n- use the `SequentialHook` class.\\n+ If the module already contains a hook, this will replace it with the new hook passed by default. To chain two hooks\\n+ together, pass `append=True`, so it chains the current and new hook into an instance of the `SequentialHook` class.\\n \\n \\n \\n Args:\\n module (`torch.nn.Module`): The module to attach a hook to.\\n hook (`ModelHook`): The hook to attach.\\n+ append (`bool`, *optional*, defaults to `False`):\\n+ Whether the hook should be chained with an existing one (if module already contains a hook) or not.\\n \\n Returns:\\n `torch.nn.Module`: The same module, with the hook attached (the module is modified in place, so the result can\\n be discarded).\\n \"\"\"\\n+\\n+ if append and (getattr(module, \"_hf_hook\", None) is not None):\\n+ old_hook = module._hf_hook\\n+ remove_hook_from_module(module)\\n+ hook = SequentialHook(old_hook, hook)\\n+\\n if hasattr(module, \"_hf_hook\") and hasattr(module, \"_old_forward\"):\\n # If we already put some hook on this module, we replace it with the new one.\\n old_forward = module._old_forward\\n@@ -352,7 +360,7 @@ def attach_align_device_hook(\\n offload_buffers=offload_buffers,\\n place_submodules=full_offload,\\n )\\n- add_hook_to_module(module, hook)\\n+ add_hook_to_module(module, hook, append=True)\\n \\n # We stop the recursion in case we hit the full offload.\\n if full_offload:\\ndiff --git a/tests/test_hooks.py b/tests/test_hooks.py\\nindex 9d48db9e1..e3824809c 100644\\n--- a/tests/test_hooks.py\\n+++ b/tests/test_hooks.py\\n@@ -69,6 +69,25 @@ def test_add_and_remove_hooks(self):\\n self.assertFalse(hasattr(test_model, \"_hf_hook\"))\\n self.assertFalse(hasattr(test_model, \"_old_forward\"))\\n \\n+ def test_append_and_remove_hooks(self):\\n+ test_model = ModelForTest()\\n+ test_hook = ModelHook()\\n+\\n+ add_hook_to_module(test_model, test_hook)\\n+ add_hook_to_module(test_model, test_hook, append=True)\\n+\\n+ self.assertEqual(isinstance(test_model._hf_hook, SequentialHook), True)\\n+ self.assertEqual(len(test_model._hf_hook.hooks), 2)\\n+ self.assertTrue(hasattr(test_model, \"_old_forward\"))\\n+\\n+ # Check adding the hook did not change the name or the signature\\n+ self.assertEqual(test_model.forward.__name__, \"forward\")\\n+ self.assertListEqual(list(inspect.signature(test_model.forward).parameters), [\"x\"])\\n+\\n+ remove_hook_from_module(test_model)\\n+ self.assertFalse(hasattr(test_model, \"_hf_hook\"))\\n+ self.assertFalse(hasattr(test_model, \"_old_forward\"))\\n+\\n def test_pre_forward_hook_is_executed(self):\\n test_model = ModelForTest()\\n x = torch.randn(2, 3)\\n',\n", - " 'code_comments': [{'body': \"```suggestion\\r\\n append (`bool`, *optional*, defaults to `False`):\\r\\n Whether the hook should be chained with an existing one (if module already contains a hook) or not.\\r\\n```\\r\\n\\r\\nAlso can't comment on line 119 but this should also be mentioned in the Note.\",\n", - " 'diff_hunk': '@@ -123,11 +123,21 @@ def add_hook_to_module(module: nn.Module, hook: ModelHook):\\n Args:\\n module (`torch.nn.Module`): The module to attach a hook to.\\n hook (`ModelHook`): The hook to attach.\\n+ append (`bool`): Whether, if module already contains a hook, should chain the new one with SequentialHook',\n", - " 'from_author': False},\n", - " {'body': 'Those two ifs can be grouped.\\r\\nAlso the second test can be simplified in `getattr(module, \"_hf_hook\", None) is not None`',\n", - " 'diff_hunk': '@@ -123,11 +123,21 @@ def add_hook_to_module(module: nn.Module, hook: ModelHook):\\n Args:\\n module (`torch.nn.Module`): The module to attach a hook to.\\n hook (`ModelHook`): The hook to attach.\\n+ append (`bool`): Whether, if module already contains a hook, should chain the new one with SequentialHook\\n \\n Returns:\\n `torch.nn.Module`: The same module, with the hook attached (the module is modified in place, so the result can\\n be discarded).\\n \"\"\"\\n+\\n+ if append:\\n+ if hasattr(module, \"_hf_hook\") and (module._hf_hook is not None):',\n", - " 'from_author': False},\n", - " {'body': '```suggestion\\r\\n hook = SequentialHook(old_hook, hook)\\r\\n```',\n", - " 'diff_hunk': '@@ -123,11 +123,21 @@ def add_hook_to_module(module: nn.Module, hook: ModelHook):\\n Args:\\n module (`torch.nn.Module`): The module to attach a hook to.\\n hook (`ModelHook`): The hook to attach.\\n+ append (`bool`): Whether, if module already contains a hook, should chain the new one with SequentialHook\\n \\n Returns:\\n `torch.nn.Module`: The same module, with the hook attached (the module is modified in place, so the result can\\n be discarded).\\n \"\"\"\\n+\\n+ if append:\\n+ if hasattr(module, \"_hf_hook\") and (module._hf_hook is not None):\\n+ old_hook = module._hf_hook\\n+ remove_hook_from_module(module)\\n+\\n+ hooks = [old_hook, hook]\\n+ hook = SequentialHook(*hooks)',\n", - " 'from_author': False},\n", - " {'body': 'Done.',\n", - " 'diff_hunk': '@@ -123,11 +123,21 @@ def add_hook_to_module(module: nn.Module, hook: ModelHook):\\n Args:\\n module (`torch.nn.Module`): The module to attach a hook to.\\n hook (`ModelHook`): The hook to attach.\\n+ append (`bool`): Whether, if module already contains a hook, should chain the new one with SequentialHook',\n", - " 'from_author': True},\n", - " {'body': 'Done, thanks for the great idea. ',\n", - " 'diff_hunk': '@@ -123,11 +123,21 @@ def add_hook_to_module(module: nn.Module, hook: ModelHook):\\n Args:\\n module (`torch.nn.Module`): The module to attach a hook to.\\n hook (`ModelHook`): The hook to attach.\\n+ append (`bool`): Whether, if module already contains a hook, should chain the new one with SequentialHook\\n \\n Returns:\\n `torch.nn.Module`: The same module, with the hook attached (the module is modified in place, so the result can\\n be discarded).\\n \"\"\"\\n+\\n+ if append:\\n+ if hasattr(module, \"_hf_hook\") and (module._hf_hook is not None):',\n", - " 'from_author': True},\n", - " {'body': 'Done. Looks much better now.',\n", - " 'diff_hunk': '@@ -123,11 +123,21 @@ def add_hook_to_module(module: nn.Module, hook: ModelHook):\\n Args:\\n module (`torch.nn.Module`): The module to attach a hook to.\\n hook (`ModelHook`): The hook to attach.\\n+ append (`bool`): Whether, if module already contains a hook, should chain the new one with SequentialHook\\n \\n Returns:\\n `torch.nn.Module`: The same module, with the hook attached (the module is modified in place, so the result can\\n be discarded).\\n \"\"\"\\n+\\n+ if append:\\n+ if hasattr(module, \"_hf_hook\") and (module._hf_hook is not None):\\n+ old_hook = module._hf_hook\\n+ remove_hook_from_module(module)\\n+\\n+ hooks = [old_hook, hook]\\n+ hook = SequentialHook(*hooks)',\n", - " 'from_author': True}],\n", - " 'context': [{'body': '_The documentation is not available anymore as the PR was closed or merged._',\n", - " 'from_author': False},\n", - " {'body': \"The fix is not exactly right: by doing so, the hook that ensures the input and output of the model are on the same device is now erased. In your code sample in #767, since `x` is on the CPU, `net(x)` should also be on the CPU. This is not the case with your PR. The solution would be to write a util function that will:\\r\\n- just add the hook if none is present\\r\\n- extract the current hook if one is present and chain it with this hook using a `SequentialHook`.\\r\\n\\r\\nThis is slightly more advanced than the current PR, so let me know if you'd prefer for me to do it :-)\",\n", - " 'from_author': False},\n", - " {'body': \"@sgugger I would like to try if that's ok to you. \\r\\n\\r\\nWhat do you think of creating an `append_if_needed` flag on `add_hook_to_module` that does what you just said?\",\n", - " 'from_author': True},\n", - " {'body': 'That works for me, though the name of the argument could simply be `append` :-)\\r\\nThanks for diving into this!',\n", - " 'from_author': False},\n", - " {'body': '@sgugger `append` it is.', 'from_author': True},\n", - " {'body': \"@sgugger it is ready for review, I've also added the tests. \",\n", - " 'from_author': True},\n", - " {'body': \"> Very nice, thanks! Left a couple of nits, and I think you should still put the hook with io first: just tested locally and we still have the same issue of `net(x)` being on the wrong device since it runs second and the input was already moved.\\r\\n\\r\\n@sgugger I've just addressed your nits and moved the io hook to the top. Thanks for the review! I've tested it locally on the snippet of the bug report and it brings the tensor back to CPU after the inference. Thanks!\",\n", - " 'from_author': True},\n", - " {'body': \"@sgugger, there is a test step that failed due to an http error when installing a lib. I've created and empty commit to try running it again. \",\n", - " 'from_author': True}],\n", - " 'url': 'https://api.github.com/repos/huggingface/accelerate/pulls/768'},\n", - " 1089393747: {'diff': 'diff --git a/docs/source/usage_guides/megatron_lm.mdx b/docs/source/usage_guides/megatron_lm.mdx\\nindex bc0b90701..d26ecd4b5 100644\\n--- a/docs/source/usage_guides/megatron_lm.mdx\\n+++ b/docs/source/usage_guides/megatron_lm.mdx\\n@@ -103,65 +103,9 @@ cd ..\\n \\n 4. Installing Megatron-LM\\n \\n- a. Cloning the Megatron-LM repo\\n- ```\\n- git clone https://github.com/NVIDIA/Megatron-LM.git\\n- cd Megatron-LM\\n- ```\\n-\\n- b. Create a file `setup.py`, paste the below code and put in the root folder\\n- ```python\\n- \"\"\"Setup for pip package.\"\"\"\\n-\\n- import os\\n- import sys\\n- import setuptools\\n-\\n- if sys.version_info < (3,):\\n- raise Exception(\"Python 2 is not supported by Megatron.\")\\n-\\n- with open(\"README.md\", \"r\") as fh:\\n- long_description = fh.read()\\n-\\n- setuptools.setup(\\n- name=\"megatron-lm\",\\n- version=\"3.0.0\",\\n- description=\"Megatron-LM: Training Multi-Billion Parameter Language Models Using Model Parallelism.\",\\n- long_description=long_description,\\n- long_description_content_type=\"text/markdown\",\\n- # The project\\'s main homepage.\\n- url=\"https://github.com/NVIDIA/Megatron-LM\",\\n- author=\"NVIDIA INC\",\\n- maintainer=\"NVIDIA INC\",\\n- # The licence under which the project is released\\n- license=\"See https://github.com/NVIDIA/Megatron-LM/blob/master/LICENSE\",\\n- classifiers=[\\n- \"Intended Audience :: Developers\",\\n- \"Intended Audience :: Science/Research\",\\n- \"Intended Audience :: Information Technology\",\\n- # Indicate what your project relates to\\n- \"Topic :: Scientific/Engineering :: Artificial Intelligence\",\\n- \"Topic :: Software Development :: Libraries :: Python Modules\",\\n- # Additional Setting\\n- \"Environment :: Console\",\\n- \"Natural Language :: English\",\\n- \"Operating System :: OS Independent\",\\n- ],\\n- python_requires=\">=3.6\",\\n- packages=setuptools.find_packages(),\\n- install_requires=[\"nltk\", \"six\", \"regex\", \"torch>=1.12.0\", \"pybind11\"],\\n- # Add in any packaged data.\\n- include_package_data=True,\\n- zip_safe=False,\\n- # PyPI package information.\\n- keywords=\"deep learning, Megatron, gpu, NLP, nvidia, pytorch, torch, language\",\\n- )\\n- ```\\n-\\n- c. installing via below command\\n- ```\\n- pip install \".\"\\n- ```\\n+```\\n+pip install git+https://github.com/huggingface/Megatron-LM.git\\n+```\\n \\n ## Accelerate Megatron-LM Plugin\\n \\n@@ -183,7 +127,7 @@ What is the number of micro-batches? [1]:2\\n Do you want to enable selective activation recomputation? [YES/no]: \\n Do you want to use distributed optimizer which shards optimizer state and gradients across data pralellel ranks? [YES/no]: \\n What is the gradient clipping value based on global L2 Norm (0 to disable)? [1.0]: \\n-How many GPU(s) should be used for distributed training? [1]:8\\n+How many GPU(s) should be used for distributed training? [1]:4\\n Do you wish to use FP16 or BF16 (mixed precision)? [NO/fp16/bf16]: bf16\\n ```\\n \\n@@ -210,7 +154,7 @@ megatron_lm_config:\\n megatron_lm_use_distributed_optimizer: true\\n mixed_precision: bf16\\n num_machines: 1\\n-num_processes: 8\\n+num_processes: 4\\n rdzv_backend: static\\n same_network: true\\n use_cpu: false\\n@@ -289,8 +233,8 @@ examples/by_feature/megatron_lm_gpt_pretraining.py \\\\\\n --dataset_config_name wikitext-2-raw-v1 \\\\\\n --block_size 1024 \\\\\\n --learning_rate 5e-5 \\\\\\n---per_device_train_batch_size 4 \\\\\\n---per_device_eval_batch_size 4 \\\\\\n+--per_device_train_batch_size 24 \\\\\\n+--per_device_eval_batch_size 24 \\\\\\n --num_train_epochs 5 \\\\\\n --with_tracking \\\\\\n --report_to \"wandb\" \\\\\\ndiff --git a/examples/by_feature/megatron_lm_gpt_pretraining.py b/examples/by_feature/megatron_lm_gpt_pretraining.py\\nindex 68a425e4d..3fcf10207 100644\\n--- a/examples/by_feature/megatron_lm_gpt_pretraining.py\\n+++ b/examples/by_feature/megatron_lm_gpt_pretraining.py\\n@@ -672,8 +672,10 @@ def group_texts(examples):\\n output_dir = os.path.join(args.output_dir, output_dir)\\n accelerator.save_state(output_dir)\\n \\n- if args.with_tracking:\\n- accelerator.end_training()\\n+ # this is causing some issue with Megatron-LM when using `wandb` at the end of the main function.\\n+ # Everything works fine inspite of commenting this out. (wandb finishes/closes the run without error)\\n+ # if args.with_tracking:\\n+ # accelerator.end_training()\\n \\n if args.output_dir is not None:\\n accelerator.wait_for_everyone()\\n',\n", - " 'code_comments': [{'body': \"Maybe add some comment here on why it's not working for now and plans to fix in the future?\",\n", - " 'diff_hunk': '@@ -672,8 +672,8 @@ def group_texts(examples):\\n output_dir = os.path.join(args.output_dir, output_dir)\\n accelerator.save_state(output_dir)\\n \\n- if args.with_tracking:\\n- accelerator.end_training()\\n+ # if args.with_tracking:\\n+ # accelerator.end_training()',\n", - " 'from_author': False},\n", - " {'body': 'Done.',\n", - " 'diff_hunk': '@@ -672,8 +672,8 @@ def group_texts(examples):\\n output_dir = os.path.join(args.output_dir, output_dir)\\n accelerator.save_state(output_dir)\\n \\n- if args.with_tracking:\\n- accelerator.end_training()\\n+ # if args.with_tracking:\\n+ # accelerator.end_training()',\n", - " 'from_author': True}],\n", - " 'context': [{'body': '_The documentation is not available anymore as the PR was closed or merged._',\n", - " 'from_author': False},\n", - " {'body': 'Hello @sgugger, wrt maintaining fork, sure, I can do that. This would help in making sure the functionality works with accelerate and we can extend it for more features (main repo is barely active).',\n", - " 'from_author': True}],\n", - " 'url': 'https://api.github.com/repos/huggingface/accelerate/pulls/766'},\n", - " 1087746985: {'diff': 'diff --git a/src/accelerate/accelerator.py b/src/accelerate/accelerator.py\\nindex e1282c0bb..bf17f57ce 100644\\n--- a/src/accelerate/accelerator.py\\n+++ b/src/accelerate/accelerator.py\\n@@ -777,10 +777,11 @@ def prepare_model(self, model: torch.nn.Module, device_placement=None):\\n if device_placement:\\n model = model.to(self.device)\\n if self.distributed_type == DistributedType.MULTI_GPU:\\n- kwargs = self.ddp_handler.to_kwargs() if self.ddp_handler is not None else {}\\n- model = torch.nn.parallel.DistributedDataParallel(\\n- model, device_ids=[self.local_process_index], output_device=self.local_process_index, **kwargs\\n- )\\n+ if any(p.requires_grad for p in model.parameters()):\\n+ kwargs = self.ddp_handler.to_kwargs() if self.ddp_handler is not None else {}\\n+ model = torch.nn.parallel.DistributedDataParallel(\\n+ model, device_ids=[self.local_process_index], output_device=self.local_process_index, **kwargs\\n+ )\\n elif self.distributed_type == DistributedType.FSDP:\\n from torch.distributed.fsdp.fully_sharded_data_parallel import FullyShardedDataParallel as FSDP\\n \\n',\n", - " 'code_comments': [],\n", - " 'context': [{'body': '_The documentation is not available anymore as the PR was closed or merged._',\n", - " 'from_author': False}],\n", - " 'url': 'https://api.github.com/repos/huggingface/accelerate/pulls/761'},\n", - " 1087176410: {'diff': 'diff --git a/README.md b/README.md\\nindex deddf2986..7b912dd80 100644\\n--- a/README.md\\n+++ b/README.md\\n@@ -243,6 +243,7 @@ pip install accelerate\\n - FP16 with native AMP (apex on the roadmap)\\n - DeepSpeed support (Experimental)\\n - PyTorch Fully Sharded Data Parallel (FSDP) support (Experimental)\\n+- Megatron-LM support (Experimental)\\n \\n ## Citing 🤗 Accelerate\\n \\ndiff --git a/docs/source/usage_guides/megatron_lm.mdx b/docs/source/usage_guides/megatron_lm.mdx\\nindex 188dddb32..bc0b90701 100644\\n--- a/docs/source/usage_guides/megatron_lm.mdx\\n+++ b/docs/source/usage_guides/megatron_lm.mdx\\n@@ -23,18 +23,45 @@ For detailed information and how things work behind the scene please refer the g\\n Accelerate integrates following feature of Megatron-LM to enable large scale pre-training/finetuning\\n of BERT (Encoder), GPT (Decoder) or T5 models (Encoder and Decoder):\\n \\n-a. **Tensor Parallelism (TP)**: Reduces memory footprint without much additional communication on intra-node ranks\\n+a. **Tensor Parallelism (TP)**: Reduces memory footprint without much additional communication on intra-node ranks.\\n+Each tensor is split into multiple chunks with each shard residing on separate GPU. At each step, the same mini-batch of data is processed\\n+independently and in parallel by each shard followed by syncing across all GPUs (`all-reduce` operation). \\n+In a simple transformer layer, this leads to 2 `all-reduces` in the forward path and 2 in the backward path.\\n+For more details, please refer research paper [Megatron-LM: Training Multi-Billion Parameter Language Models Using\\n+Model Parallelism](https://arxiv.org/pdf/1909.08053.pdf) and \\n+this section of 🤗 blogpost [The Technology Behind BLOOM Training](https://huggingface.co/blog/bloom-megatron-deepspeed#tensor-parallelism).\\n+\\n \\n b. **Pipeline Parallelism (PP)**: Reduces memory footprint and enables large scale training via inter-node parallelization. \\n Reduces the bubble of naive PP via PipeDream-Flush schedule/1F1B schedule and Interleaved 1F1B schedule. \\n-Layers are distributed uniformly across PP stages.\\n+Layers are distributed uniformly across PP stages. For example, if a model has `24` layers and we have `4` GPUs for\\n+pipeline parallelism, each GPU will have `6` layers (24/4). For more details on schedules to reduce the idle time of PP,\\n+please refer to the research paper [Efficient Large-Scale Language Model Training on GPU Clusters\\n+Using Megatron-LM](https://arxiv.org/pdf/2104.04473.pdf) and \\n+this section of 🤗 blogpost [The Technology Behind BLOOM Training](https://huggingface.co/blog/bloom-megatron-deepspeed#pipeline-parallelism).\\n \\n c. **Sequence Parallelism (SP)**: Reduces memory footprint without any additional communication. Only applicable when using TP.\\n-\\n-d. **Data Parallelism (DP)** via Distributed Optimizer: Reduces memory footprint by sharding optimizer states and gradients across DP ranks.\\n+It reduces activation memory required as it prevents the same copies to be on the tensor parallel ranks \\n+post `all-reduce` by replacing then with `reduce-scatter` and `no-op` operation would be replaced by `all-gather`. \\n+As `all-reduce = reduce-scatter + all-gather`, this saves a ton of activation memory at no added communication cost. \\n+To put it simply, it shards the outputs of each transformer layer along sequence dimension, e.g., \\n+if the sequence length is `1024` and the TP size is `4`, each GPU will have `256` tokens (1024/4) for each sample. \\n+This increases the batch size that can be supported for training. For more details, please refer to the research paper\\n+[Reducing Activation Recomputation in Large Transformer Models](https://arxiv.org/pdf/2205.05198.pdf). \\n+\\n+d. **Data Parallelism (DP)** via Distributed Optimizer: Reduces the memory footprint by sharding optimizer states and gradients across DP ranks\\n+(versus the traditional method of replicating the optimizer state across data parallel ranks). \\n+For example, when using Adam optimizer with mixed-precision training, each parameter accounts for 12 bytes of memory.\\n+This gets distributed equally across the GPUs, i.e., each parameter would account for 3 bytes (12/4) if we have 4 GPUs.\\n+For more details, please refer the research paper [ZeRO: Memory Optimizations Toward Training Trillion\\n+Parameter Models](https://arxiv.org/pdf/1910.02054.pdf) and following section of 🤗 blog \\n+[The Technology Behind BLOOM Training](https://huggingface.co/blog/bloom-megatron-deepspeed#zero-data-parallelism).\\n \\n e. **Selective Activation Recomputation**: Reduces the memory footprint of activations significantly via smart activation checkpointing.\\n It doesn\\'t store activations occupying large memory while being fast to recompute thereby achieving great tradeoff between memory and recomputation.\\n+For example, for GPT-3, this leads to 70% reduction in required memory for activations at the expense of\\n+only 2.7% FLOPs overhead for recomputation of activations. For more details, please refer to the research paper \\n+[Reducing Activation Recomputation in Large Transformer Models](https://arxiv.org/pdf/2205.05198.pdf).\\n \\n f. **Fused Kernels**: Fused Softmax, Mixed Precision Fused Layer Norm and Fused gradient accumulation to weight gradient computation of linear layer.\\n PyTorch JIT compiled Fused GeLU and Fused Bias+Dropout+Residual addition.\\ndiff --git a/docs/source/usage_guides/training_zoo.mdx b/docs/source/usage_guides/training_zoo.mdx\\nindex cc388acac..444784b49 100644\\n--- a/docs/source/usage_guides/training_zoo.mdx\\n+++ b/docs/source/usage_guides/training_zoo.mdx\\n@@ -36,6 +36,7 @@ These examples showcase specific features that the Accelerate framework offers\\n - [Memory-aware batch size finder](https://github.com/huggingface/accelerate/blob/main/examples/by_feature/memory.py)\\n - [Metric Computation](https://github.com/huggingface/accelerate/blob/main/examples/by_feature/multi_process_metrics.py)\\n - [Using Trackers](https://github.com/huggingface/accelerate/blob/main/examples/by_feature/tracking.py)\\n+- [Using Megatron-LM](https://github.com/huggingface/accelerate/blob/main/examples/by_feature/megatron_lm_gpt_pretraining.py)\\n \\n ### Full Examples \\n \\n@@ -55,6 +56,8 @@ These examples showcase every feature in Accelerate at once that was shown in \"F\\n - [Named entity recognition fine-tuning example](https://github.com/huggingface/transformers/blob/main/examples/pytorch/token-classification/run_ner_no_trainer.py)\\n - [Image classification fine-tuning example](https://github.com/huggingface/transformers/blob/main/examples/pytorch/image-classification/run_image_classification_no_trainer.py)\\n - [Summarization fine-tuning example](https://github.com/huggingface/transformers/blob/main/examples/pytorch/summarization/run_summarization_no_trainer.py)\\n+- [End-to-end examples on how to use AWS SageMaker integration of Accelerate](https://github.com/huggingface/notebooks/blob/main/sagemaker/22_accelerate_sagemaker_examples/README.md)\\n+- [Megatron-LM examples for various NLp tasks](https://github.com/pacman100/accelerate-megatron-test) \\n \\n ## Integration Examples \\n \\n',\n", - " 'code_comments': [{'body': '```suggestion\\r\\nEach tensor is split into multiple chunks with each shard residing on separate GPU. At each step, the same mini-batch of data is processed\\r\\n```',\n", - " 'diff_hunk': '@@ -23,18 +23,45 @@ For detailed information and how things work behind the scene please refer the g\\n Accelerate integrates following feature of Megatron-LM to enable large scale pre-training/finetuning\\n of BERT (Encoder), GPT (Decoder) or T5 models (Encoder and Decoder):\\n \\n-a. **Tensor Parallelism (TP)**: Reduces memory footprint without much additional communication on intra-node ranks\\n+a. **Tensor Parallelism (TP)**: Reduces memory footprint without much additional communication on intra-node ranks.\\n+Each tensor are split into multiple chunks with each shard residing on separate GPU. At each step, same minimbatch of data is processed',\n", - " 'from_author': False},\n", - " {'body': '```suggestion\\r\\nIt reduces activation memory required as it prevents the same copies to be on the tensor parallel ranks \\r\\n```',\n", - " 'diff_hunk': '@@ -23,18 +23,45 @@ For detailed information and how things work behind the scene please refer the g\\n Accelerate integrates following feature of Megatron-LM to enable large scale pre-training/finetuning\\n of BERT (Encoder), GPT (Decoder) or T5 models (Encoder and Decoder):\\n \\n-a. **Tensor Parallelism (TP)**: Reduces memory footprint without much additional communication on intra-node ranks\\n+a. **Tensor Parallelism (TP)**: Reduces memory footprint without much additional communication on intra-node ranks.\\n+Each tensor are split into multiple chunks with each shard residing on separate GPU. At each step, same minimbatch of data is processed\\n+independently and in parallel by each shard followed by syncing across all GPUs (`all-reduce` operation). \\n+In a simple transformer layer, this leads to 2 `all-reduces` in the forward path and 2 in the backward path.\\n+For more details, please refer research paper [Megatron-LM: Training Multi-Billion Parameter Language Models Using\\n+Model Parallelism](https://arxiv.org/pdf/1909.08053.pdf) and \\n+this section of 🤗 blogpost [The Technology Behind BLOOM Training](https://huggingface.co/blog/bloom-megatron-deepspeed#tensor-parallelism).\\n+\\n \\n b. **Pipeline Parallelism (PP)**: Reduces memory footprint and enables large scale training via inter-node parallelization. \\n Reduces the bubble of naive PP via PipeDream-Flush schedule/1F1B schedule and Interleaved 1F1B schedule. \\n-Layers are distributed uniformly across PP stages.\\n+Layers are distributed uniformly across PP stages. For example, if a model has `24` layers and we have `4` GPUs for\\n+pipeline parallelism, each GPU will have `6` layers (24/4). For more details on schedules to reduce the idle time of PP,\\n+please refer to the research paper [Efficient Large-Scale Language Model Training on GPU Clusters\\n+Using Megatron-LM](https://arxiv.org/pdf/2104.04473.pdf) and \\n+this section of 🤗 blogpost [The Technology Behind BLOOM Training](https://huggingface.co/blog/bloom-megatron-deepspeed#pipeline-parallelism).\\n \\n c. **Sequence Parallelism (SP)**: Reduces memory footprint without any additional communication. Only applicable when using TP.\\n-\\n-d. **Data Parallelism (DP)** via Distributed Optimizer: Reduces memory footprint by sharding optimizer states and gradients across DP ranks.\\n+It reduced activation memory required as it prevents same copies to be on the tensor parallel ranks ',\n", - " 'from_author': False},\n", - " {'body': '```suggestion\\r\\npost `all-reduce` by replacing then with `reduce-scatter` and `no-op` operation would be replaced by `all-gather`. \\r\\n```',\n", - " 'diff_hunk': '@@ -23,18 +23,45 @@ For detailed information and how things work behind the scene please refer the g\\n Accelerate integrates following feature of Megatron-LM to enable large scale pre-training/finetuning\\n of BERT (Encoder), GPT (Decoder) or T5 models (Encoder and Decoder):\\n \\n-a. **Tensor Parallelism (TP)**: Reduces memory footprint without much additional communication on intra-node ranks\\n+a. **Tensor Parallelism (TP)**: Reduces memory footprint without much additional communication on intra-node ranks.\\n+Each tensor are split into multiple chunks with each shard residing on separate GPU. At each step, same minimbatch of data is processed\\n+independently and in parallel by each shard followed by syncing across all GPUs (`all-reduce` operation). \\n+In a simple transformer layer, this leads to 2 `all-reduces` in the forward path and 2 in the backward path.\\n+For more details, please refer research paper [Megatron-LM: Training Multi-Billion Parameter Language Models Using\\n+Model Parallelism](https://arxiv.org/pdf/1909.08053.pdf) and \\n+this section of 🤗 blogpost [The Technology Behind BLOOM Training](https://huggingface.co/blog/bloom-megatron-deepspeed#tensor-parallelism).\\n+\\n \\n b. **Pipeline Parallelism (PP)**: Reduces memory footprint and enables large scale training via inter-node parallelization. \\n Reduces the bubble of naive PP via PipeDream-Flush schedule/1F1B schedule and Interleaved 1F1B schedule. \\n-Layers are distributed uniformly across PP stages.\\n+Layers are distributed uniformly across PP stages. For example, if a model has `24` layers and we have `4` GPUs for\\n+pipeline parallelism, each GPU will have `6` layers (24/4). For more details on schedules to reduce the idle time of PP,\\n+please refer to the research paper [Efficient Large-Scale Language Model Training on GPU Clusters\\n+Using Megatron-LM](https://arxiv.org/pdf/2104.04473.pdf) and \\n+this section of 🤗 blogpost [The Technology Behind BLOOM Training](https://huggingface.co/blog/bloom-megatron-deepspeed#pipeline-parallelism).\\n \\n c. **Sequence Parallelism (SP)**: Reduces memory footprint without any additional communication. Only applicable when using TP.\\n-\\n-d. **Data Parallelism (DP)** via Distributed Optimizer: Reduces memory footprint by sharding optimizer states and gradients across DP ranks.\\n+It reduced activation memory required as it prevents same copies to be on the tensor parallel ranks \\n+post `all-reduce` by replacing it with `reduce-scatter` and `no-op` operation would be replaced by `all-gather`. ',\n", - " 'from_author': False},\n", - " {'body': '```suggestion\\r\\nd. **Data Parallelism (DP)** via Distributed Optimizer: Reduces the memory footprint by sharding optimizer states and gradients across DP ranks\\r\\n```',\n", - " 'diff_hunk': '@@ -23,18 +23,45 @@ For detailed information and how things work behind the scene please refer the g\\n Accelerate integrates following feature of Megatron-LM to enable large scale pre-training/finetuning\\n of BERT (Encoder), GPT (Decoder) or T5 models (Encoder and Decoder):\\n \\n-a. **Tensor Parallelism (TP)**: Reduces memory footprint without much additional communication on intra-node ranks\\n+a. **Tensor Parallelism (TP)**: Reduces memory footprint without much additional communication on intra-node ranks.\\n+Each tensor are split into multiple chunks with each shard residing on separate GPU. At each step, same minimbatch of data is processed\\n+independently and in parallel by each shard followed by syncing across all GPUs (`all-reduce` operation). \\n+In a simple transformer layer, this leads to 2 `all-reduces` in the forward path and 2 in the backward path.\\n+For more details, please refer research paper [Megatron-LM: Training Multi-Billion Parameter Language Models Using\\n+Model Parallelism](https://arxiv.org/pdf/1909.08053.pdf) and \\n+this section of 🤗 blogpost [The Technology Behind BLOOM Training](https://huggingface.co/blog/bloom-megatron-deepspeed#tensor-parallelism).\\n+\\n \\n b. **Pipeline Parallelism (PP)**: Reduces memory footprint and enables large scale training via inter-node parallelization. \\n Reduces the bubble of naive PP via PipeDream-Flush schedule/1F1B schedule and Interleaved 1F1B schedule. \\n-Layers are distributed uniformly across PP stages.\\n+Layers are distributed uniformly across PP stages. For example, if a model has `24` layers and we have `4` GPUs for\\n+pipeline parallelism, each GPU will have `6` layers (24/4). For more details on schedules to reduce the idle time of PP,\\n+please refer to the research paper [Efficient Large-Scale Language Model Training on GPU Clusters\\n+Using Megatron-LM](https://arxiv.org/pdf/2104.04473.pdf) and \\n+this section of 🤗 blogpost [The Technology Behind BLOOM Training](https://huggingface.co/blog/bloom-megatron-deepspeed#pipeline-parallelism).\\n \\n c. **Sequence Parallelism (SP)**: Reduces memory footprint without any additional communication. Only applicable when using TP.\\n-\\n-d. **Data Parallelism (DP)** via Distributed Optimizer: Reduces memory footprint by sharding optimizer states and gradients across DP ranks.\\n+It reduced activation memory required as it prevents same copies to be on the tensor parallel ranks \\n+post `all-reduce` by replacing it with `reduce-scatter` and `no-op` operation would be replaced by `all-gather`. \\n+As `all-reduce = reduce-scatter + all-gather`, this saves a ton of activation memory at no added communication cost. \\n+To put it simply, it shards the outputs of each transformer layer along sequence dimension, e.g., \\n+if the sequence length is `1024` and the TP size is `4`, each GPU will have `256` tokens (1024/4) for each sample. \\n+This increases the batch size that can be supported for training. For more details, please refer to the research paper\\n+[Reducing Activation Recomputation in Large Transformer Models](https://arxiv.org/pdf/2205.05198.pdf). \\n+\\n+d. **Data Parallelism (DP)** via Distributed Optimizer: Reduces memory footprint by sharding optimizer states and gradients across DP ranks',\n", - " 'from_author': False}],\n", - " 'context': [{'body': '_The documentation is not available anymore as the PR was closed or merged._',\n", - " 'from_author': False}],\n", - " 'url': 'https://api.github.com/repos/huggingface/accelerate/pulls/759'},\n", - " 1087066669: {'diff': 'diff --git a/src/accelerate/utils/constants.py b/src/accelerate/utils/constants.py\\nindex 934923bef..f31027ee1 100644\\n--- a/src/accelerate/utils/constants.py\\n+++ b/src/accelerate/utils/constants.py\\n@@ -58,3 +58,5 @@\\n \"master_addr\",\\n \"master_port\",\\n ]\\n+\\n+CUDA_DISTRIBUTED_TYPES = [\"DEEPSPEED\", \"MULTI_GPU\", \"FSDP\", \"MEGATRON_LM\"]\\ndiff --git a/src/accelerate/utils/operations.py b/src/accelerate/utils/operations.py\\nindex aef1fd589..7ba4482cc 100644\\n--- a/src/accelerate/utils/operations.py\\n+++ b/src/accelerate/utils/operations.py\\n@@ -24,6 +24,7 @@\\n from torch.distributed import ReduceOp\\n \\n from ..state import AcceleratorState\\n+from .constants import CUDA_DISTRIBUTED_TYPES\\n from .dataclasses import DistributedType, TensorInformation\\n from .imports import is_tpu_available\\n from .versions import is_torch_version\\n@@ -220,12 +221,7 @@ def gather(tensor):\\n \"\"\"\\n if AcceleratorState().distributed_type == DistributedType.TPU:\\n return _tpu_gather(tensor, name=\"accelerate.utils.gather\")\\n- elif AcceleratorState().distributed_type in [\\n- DistributedType.DEEPSPEED,\\n- DistributedType.MULTI_GPU,\\n- DistributedType.FSDP,\\n- DistributedType.MEGATRON_LM,\\n- ]:\\n+ elif AcceleratorState().distributed_type in CUDA_DISTRIBUTED_TYPES:\\n return _gpu_gather(tensor)\\n elif AcceleratorState().distributed_type == DistributedType.MULTI_CPU:\\n return _cpu_gather(tensor)\\n@@ -258,12 +254,7 @@ def gather_object(object: Any):\\n \"\"\"\\n if AcceleratorState().distributed_type == DistributedType.TPU:\\n raise NotImplementedError(\"gather objects in TPU is not supported\")\\n- elif AcceleratorState().distributed_type in [\\n- DistributedType.DEEPSPEED,\\n- DistributedType.MULTI_GPU,\\n- DistributedType.FSDP,\\n- DistributedType.MEGATRON_LM,\\n- ]:\\n+ elif AcceleratorState().distributed_type in CUDA_DISTRIBUTED_TYPES:\\n return _gpu_gather_object(object)\\n elif AcceleratorState().distributed_type == DistributedType.MULTI_CPU:\\n return _cpu_gather_object(object)\\n@@ -302,12 +293,7 @@ def broadcast(tensor, from_process: int = 0):\\n \"\"\"\\n if AcceleratorState().distributed_type == DistributedType.TPU:\\n return _tpu_broadcast(tensor, src=from_process, name=\"accelerate.utils.broadcast\")\\n- elif AcceleratorState().distributed_type in [\\n- DistributedType.DEEPSPEED,\\n- DistributedType.MULTI_GPU,\\n- DistributedType.FSDP,\\n- DistributedType.MEGATRON_LM,\\n- ]:\\n+ elif AcceleratorState().distributed_type in CUDA_DISTRIBUTED_TYPES:\\n return _gpu_broadcast(tensor, src=from_process)\\n elif AcceleratorState().distributed_type == DistributedType.MULTI_CPU:\\n return _gpu_broadcast(tensor, src=from_process)\\n@@ -331,12 +317,7 @@ def broadcast_object_list(object_list, from_process: int = 0):\\n if AcceleratorState().distributed_type == DistributedType.TPU:\\n for i, obj in enumerate(object_list):\\n object_list[i] = xm.mesh_reduce(\"accelerate.utils.broadcast_object_list\", obj, lambda x: x[from_process])\\n- elif AcceleratorState().distributed_type in [\\n- DistributedType.DEEPSPEED,\\n- DistributedType.MULTI_GPU,\\n- DistributedType.FSDP,\\n- DistributedType.MEGATRON_LM,\\n- ]:\\n+ elif AcceleratorState().distributed_type in CUDA_DISTRIBUTED_TYPES:\\n torch.distributed.broadcast_object_list(object_list, src=from_process)\\n elif AcceleratorState().distributed_type == DistributedType.MULTI_CPU:\\n torch.distributed.broadcast_object_list(object_list, src=from_process)\\n@@ -452,12 +433,7 @@ def _reduce_across_processes(tensor, reduction=\"mean\"):\\n if state.distributed_type == DistributedType.TPU:\\n xm.all_reduce(\"sum\", cloned_tensor)\\n return cloned_tensor\\n- elif state.distributed_type in [\\n- DistributedType.DEEPSPEED,\\n- DistributedType.MULTI_GPU,\\n- DistributedType.FSDP,\\n- DistributedType.MEGATRON_LM,\\n- ]:\\n+ elif state.distributed_type.value in CUDA_DISTRIBUTED_TYPES:\\n torch.distributed.all_reduce(cloned_tensor, ReduceOp.SUM)\\n return cloned_tensor\\n else:\\ndiff --git a/src/accelerate/utils/random.py b/src/accelerate/utils/random.py\\nindex 8dc149a02..01c4df2af 100644\\n--- a/src/accelerate/utils/random.py\\n+++ b/src/accelerate/utils/random.py\\n@@ -19,6 +19,7 @@\\n import torch\\n \\n from ..state import AcceleratorState\\n+from .constants import CUDA_DISTRIBUTED_TYPES\\n from .dataclasses import DistributedType, RNGType\\n from .imports import is_tpu_available\\n \\n@@ -64,12 +65,7 @@ def synchronize_rng_state(rng_type: Optional[RNGType] = None, generator: Optiona\\n state = AcceleratorState()\\n if state.distributed_type == DistributedType.TPU:\\n rng_state = xm.mesh_reduce(\"random_seed\", rng_state, lambda x: x[0])\\n- elif state.distributed_type in [\\n- DistributedType.DEEPSPEED,\\n- DistributedType.MULTI_GPU,\\n- DistributedType.FSDP,\\n- DistributedType.MEGATRON_LM,\\n- ]:\\n+ elif state.distributed_type in CUDA_DISTRIBUTED_TYPES:\\n rng_state = rng_state.to(state.device)\\n torch.distributed.broadcast(rng_state, 0)\\n rng_state = rng_state.cpu()\\n',\n", - " 'code_comments': [],\n", - " 'context': [{'body': '_The documentation is not available anymore as the PR was closed or merged._',\n", - " 'from_author': False}],\n", - " 'url': 'https://api.github.com/repos/huggingface/accelerate/pulls/758'},\n", - " 1086805522: {'diff': 'diff --git a/src/accelerate/accelerator.py b/src/accelerate/accelerator.py\\nindex e40797b39..e1282c0bb 100644\\n--- a/src/accelerate/accelerator.py\\n+++ b/src/accelerate/accelerator.py\\n@@ -1194,6 +1194,9 @@ def clip_grad_norm_(self, parameters, max_norm, norm_type=2):\\n \"\"\"\\n Should be used in place of `torch.nn.utils.clip_grad_norm_`.\\n \\n+ Returns:\\n+ `torch.Tensor`: Total norm of the parameter gradients (viewed as a single vector).\\n+\\n Example:\\n \\n ```python\\n@@ -1217,13 +1220,13 @@ def clip_grad_norm_(self, parameters, max_norm, norm_type=2):\\n parameters = [p for p in parameters]\\n for model in self._models:\\n if parameters == [p for p in model.parameters()]:\\n- model.clip_grad_norm_(max_norm, norm_type)\\n- return\\n+ return model.clip_grad_norm_(max_norm, norm_type)\\n elif self.distributed_type == DistributedType.DEEPSPEED:\\n # `accelerator.backward(loss)` is doing that automatically. Therefore, it\\'s implementation is not needed\\n- return\\n+ # We cannot return the gradient norm because DeepSpeed does it.\\n+ return None\\n self.unscale_gradients()\\n- torch.nn.utils.clip_grad_norm_(parameters, max_norm, norm_type=norm_type)\\n+ return torch.nn.utils.clip_grad_norm_(parameters, max_norm, norm_type=norm_type)\\n \\n def clip_grad_value_(self, parameters, clip_value):\\n \"\"\"\\n',\n", - " 'code_comments': [],\n", - " 'context': [{'body': '_The documentation is not available anymore as the PR was closed or merged._',\n", - " 'from_author': False}],\n", - " 'url': 'https://api.github.com/repos/huggingface/accelerate/pulls/756'},\n", - " 1086523048: {'diff': 'diff --git a/src/accelerate/utils/imports.py b/src/accelerate/utils/imports.py\\nindex 58009d03b..bdb53988a 100644\\n--- a/src/accelerate/utils/imports.py\\n+++ b/src/accelerate/utils/imports.py\\n@@ -18,7 +18,9 @@\\n \\n import torch\\n \\n-from .versions import is_torch_version\\n+from packaging.version import parse\\n+\\n+from .versions import compare_versions, is_torch_version\\n \\n \\n # The package importlib_metadata is in a different place, depending on the Python version.\\n@@ -88,7 +90,11 @@ def is_bf16_available(ignore_tpu=False):\\n \\n \\n def is_megatron_lm_available():\\n- return importlib.util.find_spec(\"megatron\") is not None\\n+ package_exists = importlib.util.find_spec(\"megatron\") is not None\\n+ if package_exists:\\n+ megatron_version = parse(importlib_metadata.version(\"megatron-lm\"))\\n+ return compare_versions(megatron_version, \">=\", \"2.2.0\")\\n+ return False\\n \\n \\n def is_transformers_available():\\n',\n", - " 'code_comments': [{'body': 'Redundant `else` :)',\n", - " 'diff_hunk': '@@ -88,7 +90,12 @@ def is_bf16_available(ignore_tpu=False):\\n \\n \\n def is_megatron_lm_available():\\n- return importlib.util.find_spec(\"megatron\") is not None\\n+ package_exists = importlib.util.find_spec(\"megatron\") is not None\\n+ if package_exists:\\n+ megatron_version = parse(importlib_metadata.version(\"megatron-lm\"))\\n+ return compare_versions(megatron_version, \">=\", \"2.2.0\")\\n+ else:',\n", - " 'from_author': False},\n", - " {'body': 'Good point, thanks @tjruwase :)',\n", - " 'diff_hunk': '@@ -88,7 +90,12 @@ def is_bf16_available(ignore_tpu=False):\\n \\n \\n def is_megatron_lm_available():\\n- return importlib.util.find_spec(\"megatron\") is not None\\n+ package_exists = importlib.util.find_spec(\"megatron\") is not None\\n+ if package_exists:\\n+ megatron_version = parse(importlib_metadata.version(\"megatron-lm\"))\\n+ return compare_versions(megatron_version, \">=\", \"2.2.0\")\\n+ else:',\n", - " 'from_author': True}],\n", - " 'context': [{'body': '_The documentation is not available anymore as the PR was closed or merged._',\n", - " 'from_author': False},\n", - " {'body': 'Thank you for the fix ✨. LGTM!', 'from_author': False}],\n", - " 'url': 'https://api.github.com/repos/huggingface/accelerate/pulls/755'},\n", - " 1083848321: {'diff': 'diff --git a/src/accelerate/commands/launch.py b/src/accelerate/commands/launch.py\\nindex f348bb0a6..3e5eff45e 100644\\n--- a/src/accelerate/commands/launch.py\\n+++ b/src/accelerate/commands/launch.py\\n@@ -878,15 +878,17 @@ def launch_command(args):\\n args.use_cpu = args.cpu\\n \\n if args.num_cpu_threads_per_process is None:\\n- local_size = get_int_from_env(\\n- [\"MPI_LOCALNRANKS\", \"OMPI_COMM_WORLD_LOCAL_SIZE\", \"MV2_COMM_WORLD_LOCAL_SIZE\"], 1\\n- )\\n- args.num_cpu_threads_per_process = int(psutil.cpu_count(logical=False) / local_size)\\n- if args.num_cpu_threads_per_process == 0:\\n- args.num_cpu_threads_per_process = 1\\n- warned.append(\\n- f\"\\\\t`--num_cpu_threads_per_process` was set to `{args.num_cpu_threads_per_process}` to improve out-of-box performance\"\\n- )\\n+ args.num_cpu_threads_per_process = 1\\n+ if args.use_cpu and args.num_processes > 1:\\n+ local_size = get_int_from_env(\\n+ [\"MPI_LOCALNRANKS\", \"OMPI_COMM_WORLD_LOCAL_SIZE\", \"MV2_COMM_WORLD_LOCAL_SIZE\"], 1\\n+ )\\n+ threads_per_process = int(psutil.cpu_count(logical=False) / local_size)\\n+ if args.num_cpu_threads_per_process > 1:\\n+ args.num_cpu_threads_per_process = threads_per_process\\n+ warned.append(\\n+ f\"\\\\t`--num_cpu_threads_per_process` was set to `{args.num_cpu_threads_per_process}` to improve out-of-box performance when training on CPUs\"\\n+ )\\n \\n if any(warned):\\n message = \"The following values were not passed to `accelerate launch` and had defaults used instead:\\\\n\"\\n',\n", - " 'code_comments': [{'body': \"This means a chosen value of 1 is erased. Let's leave the default at `None` and change it at 1 when not in distributed CPU, or the other default when in distributed CPU.\",\n", - " 'diff_hunk': '@@ -877,15 +877,15 @@ def launch_command(args):\\n if not hasattr(args, \"use_cpu\"):\\n args.use_cpu = args.cpu\\n \\n- if args.num_cpu_threads_per_process is None:\\n+ if args.num_cpu_threads_per_process == 1 and args.use_cpu:',\n", - " 'from_author': False},\n", - " {'body': 'Adjusted :) ',\n", - " 'diff_hunk': '@@ -877,15 +877,15 @@ def launch_command(args):\\n if not hasattr(args, \"use_cpu\"):\\n args.use_cpu = args.cpu\\n \\n- if args.num_cpu_threads_per_process is None:\\n+ if args.num_cpu_threads_per_process == 1 and args.use_cpu:',\n", - " 'from_author': True},\n", - " {'body': \"```suggestion\\r\\n if args.num_cpu_threads_per_process is None:\\r\\n```\\r\\nLet's not rely on Python bool conversion magic please :-)\",\n", - " 'diff_hunk': '@@ -877,16 +877,18 @@ def launch_command(args):\\n if not hasattr(args, \"use_cpu\"):\\n args.use_cpu = args.cpu\\n \\n- if args.num_cpu_threads_per_process is None:\\n- local_size = get_int_from_env(\\n- [\"MPI_LOCALNRANKS\", \"OMPI_COMM_WORLD_LOCAL_SIZE\", \"MV2_COMM_WORLD_LOCAL_SIZE\"], 1\\n- )\\n- args.num_cpu_threads_per_process = int(psutil.cpu_count(logical=False) / local_size)\\n- if args.num_cpu_threads_per_process == 0:\\n- args.num_cpu_threads_per_process = 1\\n- warned.append(\\n- f\"\\\\t`--num_cpu_threads_per_process` was set to `{args.num_cpu_threads_per_process}` to improve out-of-box performance\"\\n- )\\n+ if not args.num_cpu_threads_per_process:',\n", - " 'from_author': False}],\n", - " 'context': [{'body': '_The documentation is not available anymore as the PR was closed or merged._',\n", - " 'from_author': False}],\n", - " 'url': 'https://api.github.com/repos/huggingface/accelerate/pulls/753'},\n", - " 1083718310: {'diff': 'diff --git a/src/accelerate/commands/config/config_args.py b/src/accelerate/commands/config/config_args.py\\nindex 43faf45ca..716d5f602 100644\\n--- a/src/accelerate/commands/config/config_args.py\\n+++ b/src/accelerate/commands/config/config_args.py\\n@@ -165,6 +165,7 @@ class SageMakerConfig(BaseConfig):\\n profile: Optional[str] = None\\n region: str = \"us-east-1\"\\n num_machines: int = 1\\n+ gpu_ids: str = \"all\"\\n base_job_name: str = f\"accelerate-sagemaker-{num_machines}\"\\n pytorch_version: str = SAGEMAKER_PYTORCH_VERSION\\n transformers_version: str = SAGEMAKER_TRANSFORMERS_VERSION\\n',\n", - " 'code_comments': [],\n", - " 'context': [{'body': '_The documentation is not available anymore as the PR was closed or merged._',\n", - " 'from_author': False}],\n", - " 'url': 'https://api.github.com/repos/huggingface/accelerate/pulls/751'},\n", - " 1081649357: {'diff': 'diff --git a/docs/source/index.mdx b/docs/source/index.mdx\\nindex 1664fd7a2..a87bd6e32 100644\\n--- a/docs/source/index.mdx\\n+++ b/docs/source/index.mdx\\n@@ -51,19 +51,19 @@ accelerate launch {my_script.py}\\n \\n
\\n
\\n-
Tutorials
\\n

Learn the basics and become familiar with using 🤗 Accelerate. Start here if you are using 🤗 Accelerate for the first time!

\\n
\\n-
How-to guides
\\n

Practical guides to help you achieve a specific goal. Take a look at these guides to learn how to use 🤗 Accelerate to solve real-world problems.

\\n
\\n-
Conceptual guides
\\n

High-level explanations for building a better understanding of important topics such as avoiding subtle nuances and pitfalls in distributed training and DeepSpeed.

\\n
\\n-
Reference
\\n

Technical descriptions of how 🤗 Accelerate classes and methods work.

\\n
\\n',\n", - " 'code_comments': [],\n", - " 'context': [{'body': '_The documentation is not available anymore as the PR was closed or merged._',\n", - " 'from_author': False}],\n", - " 'url': 'https://api.github.com/repos/huggingface/accelerate/pulls/749'},\n", - " 1080567797: {'diff': 'diff --git a/src/accelerate/utils/modeling.py b/src/accelerate/utils/modeling.py\\nindex 0b57ae6d8..08ee3a65a 100644\\n--- a/src/accelerate/utils/modeling.py\\n+++ b/src/accelerate/utils/modeling.py\\n@@ -258,7 +258,7 @@ def get_max_layer_size(\\n modules_to_treat = modules.copy()\\n while len(modules_to_treat) > 0:\\n module_name, module = modules_to_treat.pop(0)\\n- modules_children = list(module.named_children())\\n+ modules_children = list(module.named_children()) if isinstance(module, torch.nn.Module) else []\\n if len(modules_children) == 0 or module.__class__.__name__ in no_split_module_classes:\\n # No splitting this one so we compare to the max_size\\n size = module_sizes[module_name]\\n',\n", - " 'code_comments': [{'body': '```suggestion\\r\\n modules_children = list(module.named_children()) if isinstance(module, torch.nn.Module) else []\\r\\n```',\n", - " 'diff_hunk': '@@ -258,7 +258,7 @@ def get_max_layer_size(\\n modules_to_treat = modules.copy()\\n while len(modules_to_treat) > 0:\\n module_name, module = modules_to_treat.pop(0)\\n- modules_children = list(module.named_children())\\n+ modules_children = list(module.named_children()) if not isinstance(module, torch.nn.Parameter) else []',\n", - " 'from_author': False}],\n", - " 'context': [{'body': '_The documentation is not available anymore as the PR was closed or merged._',\n", - " 'from_author': False},\n", - " {'body': '> What\\'s a reproducer? I\\'m not sure how you get to a parameter here since we only apply the `children` method.\\r\\n\\r\\nHaha that was fast. Here a repro:\\r\\n\\r\\n```py\\r\\nfrom diffusers.pipelines.stable_diffusion import StableDiffusionSafetyChecker\\r\\n\\r\\nStableDiffusionSafetyChecker._no_split_modules = [\"CLIPEncoderLayer\"]\\r\\npipe = StableDiffusionSafetyChecker.from_pretrained(\\'CompVis/stable-diffusion-safety-checker\\', device_map=\\'auto\\')\\r\\n```\\r\\n\\r\\n(sorry, it\\'s 1GB of download)',\n", - " 'from_author': True},\n", - " {'body': 'Which branch should I check? On main I just get an error telling me device_map auto is not supported yet',\n", - " 'from_author': False},\n", - " {'body': 'Ah sorry just updated it above, can you try:\\r\\n\\r\\n```python\\r\\nfrom diffusers.pipelines.stable_diffusion import StableDiffusionSafetyChecker\\r\\n\\r\\nStableDiffusionSafetyChecker._no_split_modules = [\"CLIPEncoderLayer\"]\\r\\npipe = StableDiffusionSafetyChecker.from_pretrained(\\'CompVis/stable-diffusion-safety-checker\\', device_map=\\'auto\\')\\r\\n```',\n", - " 'from_author': True},\n", - " {'body': 'Awesome! Can I merge after one :heavy_check_mark: here or wait for another reviewer? ',\n", - " 'from_author': True},\n", - " {'body': 'Nope, you can go ahead and merge!', 'from_author': False}],\n", - " 'url': 'https://api.github.com/repos/huggingface/accelerate/pulls/747'},\n", - " 1080453143: {'diff': 'diff --git a/src/accelerate/commands/config/cluster.py b/src/accelerate/commands/config/cluster.py\\nindex 01c419f4f..e86a0abff 100644\\n--- a/src/accelerate/commands/config/cluster.py\\n+++ b/src/accelerate/commands/config/cluster.py\\n@@ -35,6 +35,7 @@ def get_cluster_input():\\n \\n machine_rank = 0\\n num_machines = 1\\n+ num_processes = 1\\n gpu_ids = None\\n main_process_ip = None\\n main_process_port = None\\n@@ -294,12 +295,6 @@ def get_cluster_input():\\n default=1,\\n error_message=\"Please enter an integer.\",\\n )\\n-\\n- if distributed_type in [DistributedType.MULTI_GPU, DistributedType.NO] and not use_cpu:\\n- gpu_ids = _ask_field(\\n- \"What GPU(s) (by id) should be used for training on this machine as a comma-seperated list? [all]:\",\\n- default=\"all\",\\n- )\\n elif distributed_type in [DistributedType.FSDP, DistributedType.DEEPSPEED]:\\n num_processes = _ask_field(\\n \"How many GPU(s) should be used for distributed training? [1]:\",\\n@@ -310,6 +305,12 @@ def get_cluster_input():\\n else:\\n num_processes = 1\\n \\n+ if distributed_type in [DistributedType.MULTI_GPU, DistributedType.NO] and not use_cpu:\\n+ gpu_ids = _ask_field(\\n+ \"What GPU(s) (by id) should be used for training on this machine as a comma-seperated list? [all]:\",\\n+ default=\"all\",\\n+ )\\n+\\n if distributed_type != DistributedType.TPU:\\n if distributed_type == DistributedType.DEEPSPEED and use_deepspeed_config:\\n mixed_precision = \"no\"\\n',\n", - " 'code_comments': [],\n", - " 'context': [{'body': '_The documentation is not available anymore as the PR was closed or merged._',\n", - " 'from_author': False}],\n", - " 'url': 'https://api.github.com/repos/huggingface/accelerate/pulls/746'},\n", - " 1077418429: {'diff': 'diff --git a/docs/source/usage_guides/training_zoo.mdx b/docs/source/usage_guides/training_zoo.mdx\\nindex b3fff25e3..cc388acac 100644\\n--- a/docs/source/usage_guides/training_zoo.mdx\\n+++ b/docs/source/usage_guides/training_zoo.mdx\\n@@ -27,6 +27,7 @@ These examples showcase the base features of Accelerate and are a great starting\\n \\n These examples showcase specific features that the Accelerate framework offers\\n \\n+- [Automatic memory-aware gradient accumulation](https://github.com/huggingface/accelerate/blob/main/examples/by_feature/automatic_gradient_accumulation.py)\\n - [Checkpointing states](https://github.com/huggingface/accelerate/blob/main/examples/by_feature/checkpointing.py)\\n - [Cross validation](https://github.com/huggingface/accelerate/blob/main/examples/by_feature/cross_validation.py)\\n - [DeepSpeed](https://github.com/huggingface/accelerate/blob/main/examples/by_feature/deepspeed_with_config_support.py)\\ndiff --git a/examples/by_feature/automatic_gradient_accumulation.py b/examples/by_feature/automatic_gradient_accumulation.py\\nnew file mode 100644\\nindex 000000000..d6e0cf028\\n--- /dev/null\\n+++ b/examples/by_feature/automatic_gradient_accumulation.py\\n@@ -0,0 +1,232 @@\\n+# Copyright 2022 The HuggingFace Team. All rights reserved.\\n+#\\n+# Licensed under the Apache License, Version 2.0 (the \"License\");\\n+# you may not use this file except in compliance with the License.\\n+# You may obtain a copy of the License at\\n+#\\n+# http://www.apache.org/licenses/LICENSE-2.0\\n+#\\n+# Unless required by applicable law or agreed to in writing, software\\n+# distributed under the License is distributed on an \"AS IS\" BASIS,\\n+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\\n+# See the License for the specific language governing permissions and\\n+# limitations under the License.\\n+import argparse\\n+import os\\n+\\n+import torch\\n+from torch.optim import AdamW\\n+from torch.utils.data import DataLoader\\n+\\n+# New Code #\\n+import evaluate\\n+from accelerate import Accelerator, DistributedType\\n+from accelerate.utils import find_executable_batch_size\\n+from datasets import load_dataset\\n+from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed\\n+\\n+\\n+########################################################################\\n+# This is a fully working simple example to use Accelerate,\\n+# specifically showcasing how to combine both the gradient accumulation\\n+# and automatic batch size finder utilities of Accelerate to perfrom\\n+# automatic gradient accumulation\\n+#\\n+# This example trains a Bert base model on GLUE MRPC\\n+# in any of the following settings (with the same script):\\n+# - single CPU or single GPU\\n+# - multi GPUS (using PyTorch distributed mode)\\n+# - (multi) TPUs\\n+# - fp16 (mixed-precision) or fp32 (normal precision)\\n+#\\n+# New additions from the base script can be found quickly by\\n+# looking for the # New Code # tags\\n+#\\n+# To run it in each of these various modes, follow the instructions\\n+# in the readme for examples:\\n+# https://github.com/huggingface/accelerate/tree/main/examples\\n+#\\n+########################################################################\\n+\\n+EVAL_BATCH_SIZE = 32\\n+\\n+\\n+def get_dataloaders(accelerator: Accelerator, batch_size: int = 16):\\n+ \"\"\"\\n+ Creates a set of `DataLoader`s for the `glue` dataset,\\n+ using \"bert-base-cased\" as the tokenizer.\\n+\\n+ Args:\\n+ accelerator (`Accelerator`):\\n+ An `Accelerator` object\\n+ batch_size (`int`, *optional*):\\n+ The batch size for the train and validation DataLoaders.\\n+ \"\"\"\\n+ tokenizer = AutoTokenizer.from_pretrained(\"bert-base-cased\")\\n+ datasets = load_dataset(\"glue\", \"mrpc\")\\n+\\n+ def tokenize_function(examples):\\n+ # max_length=None => use the model max length (it\\'s actually the default)\\n+ outputs = tokenizer(examples[\"sentence1\"], examples[\"sentence2\"], truncation=True, max_length=None)\\n+ return outputs\\n+\\n+ # Apply the method we just defined to all the examples in all the splits of the dataset\\n+ # starting with the main process first:\\n+ with accelerator.main_process_first():\\n+ tokenized_datasets = datasets.map(\\n+ tokenize_function,\\n+ batched=True,\\n+ remove_columns=[\"idx\", \"sentence1\", \"sentence2\"],\\n+ )\\n+\\n+ # We also rename the \\'label\\' column to \\'labels\\' which is the expected name for labels by the models of the\\n+ # transformers library\\n+ tokenized_datasets = tokenized_datasets.rename_column(\"label\", \"labels\")\\n+\\n+ def collate_fn(examples):\\n+ # On TPU it\\'s best to pad everything to the same length or training will be very slow.\\n+ if accelerator.distributed_type == DistributedType.TPU:\\n+ return tokenizer.pad(examples, padding=\"max_length\", max_length=128, return_tensors=\"pt\")\\n+ return tokenizer.pad(examples, padding=\"longest\", return_tensors=\"pt\")\\n+\\n+ # Instantiate dataloaders.\\n+ train_dataloader = DataLoader(\\n+ tokenized_datasets[\"train\"], shuffle=True, collate_fn=collate_fn, batch_size=batch_size\\n+ )\\n+ eval_dataloader = DataLoader(\\n+ tokenized_datasets[\"validation\"], shuffle=False, collate_fn=collate_fn, batch_size=EVAL_BATCH_SIZE\\n+ )\\n+\\n+ return train_dataloader, eval_dataloader\\n+\\n+\\n+# For testing only\\n+if os.environ.get(\"TESTING_MOCKED_DATALOADERS\", None) == \"1\":\\n+ from accelerate.test_utils.training import mocked_dataloaders\\n+\\n+ get_dataloaders = mocked_dataloaders # noqa: F811\\n+\\n+\\n+def training_function(config, args):\\n+ # For testing only\\n+ if os.environ.get(\"TESTING_MOCKED_DATALOADERS\", None) == \"1\":\\n+ config[\"num_epochs\"] = 2\\n+ # Initialize accelerator\\n+ accelerator = Accelerator(cpu=args.cpu, mixed_precision=args.mixed_precision)\\n+ # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs\\n+ lr = config[\"lr\"]\\n+ num_epochs = int(config[\"num_epochs\"])\\n+ seed = int(config[\"seed\"])\\n+ observed_batch_size = int(config[\"batch_size\"])\\n+\\n+ metric = evaluate.load(\"glue\", \"mrpc\")\\n+\\n+ # New Code #\\n+ # We use the `find_executable_batch_size` decorator, passing in the desired observed batch size\\n+ # to train on. If a CUDA OOM error occurs, it will retry this loop cutting the batch size in\\n+ # half each time. From this, we can calculate the number of gradient accumulation steps needed\\n+ # and modify the Accelerator object as a result\\n+ @find_executable_batch_size(starting_batch_size=int(observed_batch_size))\\n+ def inner_training_loop(batch_size):\\n+ # Since we need to modify the outside accelerator object, we need to bring it\\n+ # to the local scope\\n+ nonlocal accelerator\\n+\\n+ # We can calculate the number of gradient accumulation steps based on the current\\n+ # batch size vs the starting batch size\\n+ num_gradient_accumulation_steps = observed_batch_size // batch_size\\n+\\n+ # And then set it in the Accelerator directly:\\n+ accelerator.gradient_accumulation_steps = num_gradient_accumulation_steps\\n+\\n+ # Next we need to free all of the stored model references in the Accelerator each time\\n+ accelerator.free_memory()\\n+\\n+ # And set the seed so our results are reproducable each reset\\n+ set_seed(seed)\\n+\\n+ # Instantiate the model (we build the model here so that the seed also control new weights initialization)\\n+ model = AutoModelForSequenceClassification.from_pretrained(\"bert-base-cased\", return_dict=True)\\n+\\n+ # We could avoid this line since the accelerator is set with `device_placement=True` (default value).\\n+ # Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer\\n+ # creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).\\n+ model = model.to(accelerator.device)\\n+\\n+ # Instantiate optimizer\\n+ optimizer = AdamW(params=model.parameters(), lr=lr)\\n+ train_dataloader, eval_dataloader = get_dataloaders(accelerator, batch_size)\\n+\\n+ # Instantiate scheduler\\n+ lr_scheduler = get_linear_schedule_with_warmup(\\n+ optimizer=optimizer,\\n+ num_warmup_steps=100,\\n+ num_training_steps=(len(train_dataloader) * num_epochs),\\n+ )\\n+\\n+ # Prepare everything\\n+ # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the\\n+ # prepare method.\\n+ model, optimizer, train_dataloader, eval_dataloader, lr_scheduler = accelerator.prepare(\\n+ model, optimizer, train_dataloader, eval_dataloader, lr_scheduler\\n+ )\\n+\\n+ # Now we train the model\\n+ for epoch in range(num_epochs):\\n+ model.train()\\n+ for step, batch in enumerate(train_dataloader):\\n+ # And perform gradient accumulation\\n+ with accelerator.accumulate(model):\\n+ # We could avoid this line since we set the accelerator with `device_placement=True`.\\n+ batch.to(accelerator.device)\\n+ outputs = model(**batch)\\n+ loss = outputs.loss\\n+ accelerator.backward(loss)\\n+ optimizer.step()\\n+ lr_scheduler.step()\\n+ optimizer.zero_grad()\\n+\\n+ model.eval()\\n+ for step, batch in enumerate(eval_dataloader):\\n+ # We could avoid this line since we set the accelerator with `device_placement=True`.\\n+ batch.to(accelerator.device)\\n+ with torch.no_grad():\\n+ outputs = model(**batch)\\n+ predictions = outputs.logits.argmax(dim=-1)\\n+ predictions, references = accelerator.gather_for_metrics((predictions, batch[\"labels\"]))\\n+ metric.add_batch(\\n+ predictions=predictions,\\n+ references=references,\\n+ )\\n+\\n+ eval_metric = metric.compute()\\n+ # Use accelerator.print to print only on the main process.\\n+ accelerator.print(f\"epoch {epoch}:\", eval_metric)\\n+\\n+ # New Code #\\n+ # And call it at the end with no arguments\\n+ # Note: You could also refactor this outside of your training loop function\\n+ inner_training_loop()\\n+\\n+\\n+def main():\\n+ parser = argparse.ArgumentParser(description=\"Simple example of training script.\")\\n+ parser.add_argument(\\n+ \"--mixed_precision\",\\n+ type=str,\\n+ default=\"no\",\\n+ choices=[\"no\", \"fp16\", \"bf16\"],\\n+ help=\"Whether to use mixed precision. Choose\"\\n+ \"between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.\"\\n+ \"and an Nvidia Ampere GPU.\",\\n+ )\\n+ parser.add_argument(\"--cpu\", action=\"store_true\", help=\"If passed, will train on the CPU.\")\\n+ args = parser.parse_args()\\n+ # New Code #\\n+ # We modify the starting batch size to be an observed batch size of 256, to guarentee an initial CUDA OOM\\n+ config = {\"lr\": 2e-5, \"num_epochs\": 3, \"seed\": 42, \"batch_size\": 256}\\n+ training_function(config, args)\\n+\\n+\\n+if __name__ == \"__main__\":\\n+ main()\\ndiff --git a/tests/test_examples.py b/tests/test_examples.py\\nindex ac8c4de52..9197070c8 100644\\n--- a/tests/test_examples.py\\n+++ b/tests/test_examples.py\\n@@ -36,6 +36,7 @@\\n \"gradient_accumulation.py\",\\n \"multi_process_metrics.py\",\\n \"memory.py\",\\n+ \"automatic_gradient_accumulation.py\",\\n \"fsdp_with_peak_mem_tracking.py\",\\n \"deepspeed_with_config_support.py\",\\n ]\\n',\n", - " 'code_comments': [{'body': '```suggestion\\r\\n # half each time. From this, we can calculate the number of gradient accumulation steps needed\\r\\n```',\n", - " 'diff_hunk': '@@ -0,0 +1,232 @@\\n+# Copyright 2022 The HuggingFace Team. All rights reserved.\\n+#\\n+# Licensed under the Apache License, Version 2.0 (the \"License\");\\n+# you may not use this file except in compliance with the License.\\n+# You may obtain a copy of the License at\\n+#\\n+# http://www.apache.org/licenses/LICENSE-2.0\\n+#\\n+# Unless required by applicable law or agreed to in writing, software\\n+# distributed under the License is distributed on an \"AS IS\" BASIS,\\n+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\\n+# See the License for the specific language governing permissions and\\n+# limitations under the License.\\n+import argparse\\n+import os\\n+\\n+import torch\\n+from torch.optim import AdamW\\n+from torch.utils.data import DataLoader\\n+\\n+# New Code #\\n+import evaluate\\n+from accelerate import Accelerator, DistributedType\\n+from accelerate.utils import find_executable_batch_size\\n+from datasets import load_dataset\\n+from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed\\n+\\n+\\n+########################################################################\\n+# This is a fully working simple example to use Accelerate,\\n+# specifically showcasing how to combine both the gradient accumulation\\n+# and automatic batch size finder utilities of Accelerate to perfrom\\n+# automatic gradient accumulation\\n+#\\n+# This example trains a Bert base model on GLUE MRPC\\n+# in any of the following settings (with the same script):\\n+# - single CPU or single GPU\\n+# - multi GPUS (using PyTorch distributed mode)\\n+# - (multi) TPUs\\n+# - fp16 (mixed-precision) or fp32 (normal precision)\\n+#\\n+# New additions from the base script can be found quickly by\\n+# looking for the # New Code # tags\\n+#\\n+# To run it in each of these various modes, follow the instructions\\n+# in the readme for examples:\\n+# https://github.com/huggingface/accelerate/tree/main/examples\\n+#\\n+########################################################################\\n+\\n+EVAL_BATCH_SIZE = 32\\n+\\n+\\n+def get_dataloaders(accelerator: Accelerator, batch_size: int = 16):\\n+ \"\"\"\\n+ Creates a set of `DataLoader`s for the `glue` dataset,\\n+ using \"bert-base-cased\" as the tokenizer.\\n+\\n+ Args:\\n+ accelerator (`Accelerator`):\\n+ An `Accelerator` object\\n+ batch_size (`int`, *optional*):\\n+ The batch size for the train and validation DataLoaders.\\n+ \"\"\"\\n+ tokenizer = AutoTokenizer.from_pretrained(\"bert-base-cased\")\\n+ datasets = load_dataset(\"glue\", \"mrpc\")\\n+\\n+ def tokenize_function(examples):\\n+ # max_length=None => use the model max length (it\\'s actually the default)\\n+ outputs = tokenizer(examples[\"sentence1\"], examples[\"sentence2\"], truncation=True, max_length=None)\\n+ return outputs\\n+\\n+ # Apply the method we just defined to all the examples in all the splits of the dataset\\n+ # starting with the main process first:\\n+ with accelerator.main_process_first():\\n+ tokenized_datasets = datasets.map(\\n+ tokenize_function,\\n+ batched=True,\\n+ remove_columns=[\"idx\", \"sentence1\", \"sentence2\"],\\n+ )\\n+\\n+ # We also rename the \\'label\\' column to \\'labels\\' which is the expected name for labels by the models of the\\n+ # transformers library\\n+ tokenized_datasets = tokenized_datasets.rename_column(\"label\", \"labels\")\\n+\\n+ def collate_fn(examples):\\n+ # On TPU it\\'s best to pad everything to the same length or training will be very slow.\\n+ if accelerator.distributed_type == DistributedType.TPU:\\n+ return tokenizer.pad(examples, padding=\"max_length\", max_length=128, return_tensors=\"pt\")\\n+ return tokenizer.pad(examples, padding=\"longest\", return_tensors=\"pt\")\\n+\\n+ # Instantiate dataloaders.\\n+ train_dataloader = DataLoader(\\n+ tokenized_datasets[\"train\"], shuffle=True, collate_fn=collate_fn, batch_size=batch_size\\n+ )\\n+ eval_dataloader = DataLoader(\\n+ tokenized_datasets[\"validation\"], shuffle=False, collate_fn=collate_fn, batch_size=EVAL_BATCH_SIZE\\n+ )\\n+\\n+ return train_dataloader, eval_dataloader\\n+\\n+\\n+# For testing only\\n+if os.environ.get(\"TESTING_MOCKED_DATALOADERS\", None) == \"1\":\\n+ from accelerate.test_utils.training import mocked_dataloaders\\n+\\n+ get_dataloaders = mocked_dataloaders # noqa: F811\\n+\\n+\\n+def training_function(config, args):\\n+ # For testing only\\n+ if os.environ.get(\"TESTING_MOCKED_DATALOADERS\", None) == \"1\":\\n+ config[\"num_epochs\"] = 2\\n+ # Initialize accelerator\\n+ accelerator = Accelerator(cpu=args.cpu, mixed_precision=args.mixed_precision)\\n+ # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs\\n+ lr = config[\"lr\"]\\n+ num_epochs = int(config[\"num_epochs\"])\\n+ seed = int(config[\"seed\"])\\n+ observed_batch_size = int(config[\"batch_size\"])\\n+\\n+ metric = evaluate.load(\"glue\", \"mrpc\")\\n+\\n+ # New Code #\\n+ # We use the `find_executable_batch_size` decorator, passing in the desired observed batch size\\n+ # to train on. If a CUDA OOM error occurs, it will retry this loop cutting the batch size in\\n+ # half each time. From this, we can calcualte the number of gradient accumulation steps needed',\n", - " 'from_author': False}],\n", - " 'context': [{'body': 'The docs for this PR live [here](/static-proxy?url=https%3A%2F%2Fmoon-ci-docs.huggingface.co%2Fdocs%2Faccelerate%2Fpr_742). All of your documentation changes will be reflected on that endpoint.',\n", - " 'from_author': False}],\n", - " 'url': 'https://api.github.com/repos/huggingface/accelerate/pulls/742'},\n", - " 1076498274: {'diff': 'diff --git a/src/accelerate/commands/launch.py b/src/accelerate/commands/launch.py\\nindex 982293a33..f348bb0a6 100644\\n--- a/src/accelerate/commands/launch.py\\n+++ b/src/accelerate/commands/launch.py\\n@@ -45,6 +45,7 @@\\n )\\n from accelerate.utils.constants import DEEPSPEED_MULTINODE_LAUNCHERS\\n from accelerate.utils.dataclasses import SageMakerDistributedType\\n+from accelerate.utils.launch import env_var_path_add\\n \\n \\n if is_rich_available():\\n@@ -573,7 +574,7 @@ def deepspeed_launcher(args):\\n warnings.warn(\\'--fp16 flag is deprecated. Use \"--mixed_precision fp16\" instead.\\', DeprecationWarning)\\n mixed_precision = \"fp16\"\\n \\n- current_env[\"PYTHONPATH\"] = sys.executable\\n+ current_env[\"PYTHONPATH\"] = env_var_path_add(\"PYTHONPATH\", os.path.abspath(\".\"))\\n current_env[\"MIXED_PRECISION\"] = str(mixed_precision)\\n current_env[\"USE_DEEPSPEED\"] = \"true\"\\n current_env[\"DEEPSPEED_ZERO_STAGE\"] = str(args.zero_stage)\\ndiff --git a/src/accelerate/utils/launch.py b/src/accelerate/utils/launch.py\\nindex 31d71ad21..8642a441a 100644\\n--- a/src/accelerate/utils/launch.py\\n+++ b/src/accelerate/utils/launch.py\\n@@ -52,6 +52,16 @@ def _filter_args(args):\\n return new_args\\n \\n \\n+def env_var_path_add(env_var_name, path_to_add):\\n+ \"\"\"\\n+ Extends a path-based environment variable\\'s value with a new path and returns the updated value. It\\'s up to the\\n+ caller to set it in os.environ.\\n+ \"\"\"\\n+ paths = [p for p in os.environ.get(env_var_name, \"\").split(\":\") if len(p) > 0]\\n+ paths.append(str(path_to_add))\\n+ return \":\".join(paths)\\n+\\n+\\n class PrepareForLaunch:\\n \"\"\"\\n Prepare a function that will launched in a distributed setup.\\n',\n", - " 'code_comments': [{'body': \"Let's just write the three lines of code instead of adding a nested function? It will be shorter and as easy to read.\",\n", - " 'diff_hunk': '@@ -573,7 +573,13 @@ def deepspeed_launcher(args):\\n warnings.warn(\\'--fp16 flag is deprecated. Use \"--mixed_precision fp16\" instead.\\', DeprecationWarning)\\n mixed_precision = \"fp16\"\\n \\n- current_env[\"PYTHONPATH\"] = sys.executable\\n+ def env_path_add(env_var_name, new_path):\\n+ \"\"\"extend path-based env variable with a new path\"\"\"\\n+ paths = [p for p in os.environ.get(env_var_name, \"\").split(\":\") if len(p) > 0]\\n+ paths.append(str(new_path))\\n+ return \":\".join(paths)\\n+\\n+ current_env[\"PYTHONPATH\"] = env_path_add(\"PYTHONPATH\", sys.executable)',\n", - " 'from_author': False},\n", - " {'body': \"as it's likely that at some point you might want to extend other path-based env vars like `PATH` or `LD_LIBRARY_PATH` - one more option is to move it to utils. \\r\\n\\r\\nbut if you prefer inlined and @pacman100 is an agreement I will rewrite.\",\n", - " 'diff_hunk': '@@ -573,7 +573,13 @@ def deepspeed_launcher(args):\\n warnings.warn(\\'--fp16 flag is deprecated. Use \"--mixed_precision fp16\" instead.\\', DeprecationWarning)\\n mixed_precision = \"fp16\"\\n \\n- current_env[\"PYTHONPATH\"] = sys.executable\\n+ def env_path_add(env_var_name, new_path):\\n+ \"\"\"extend path-based env variable with a new path\"\"\"\\n+ paths = [p for p in os.environ.get(env_var_name, \"\").split(\":\") if len(p) > 0]\\n+ paths.append(str(new_path))\\n+ return \":\".join(paths)\\n+\\n+ current_env[\"PYTHONPATH\"] = env_path_add(\"PYTHONPATH\", sys.executable)',\n", - " 'from_author': True},\n", - " {'body': \"I'm fine having it as a util too, just don't like the nested function approach where it can't be reused.\",\n", - " 'diff_hunk': '@@ -573,7 +573,13 @@ def deepspeed_launcher(args):\\n warnings.warn(\\'--fp16 flag is deprecated. Use \"--mixed_precision fp16\" instead.\\', DeprecationWarning)\\n mixed_precision = \"fp16\"\\n \\n- current_env[\"PYTHONPATH\"] = sys.executable\\n+ def env_path_add(env_var_name, new_path):\\n+ \"\"\"extend path-based env variable with a new path\"\"\"\\n+ paths = [p for p in os.environ.get(env_var_name, \"\").split(\":\") if len(p) > 0]\\n+ paths.append(str(new_path))\\n+ return \":\".join(paths)\\n+\\n+ current_env[\"PYTHONPATH\"] = env_path_add(\"PYTHONPATH\", sys.executable)',\n", - " 'from_author': False},\n", - " {'body': \"pushed the change to move to utils, renamed the helper for better clarity now that it's far away from definition.\",\n", - " 'diff_hunk': '@@ -573,7 +573,13 @@ def deepspeed_launcher(args):\\n warnings.warn(\\'--fp16 flag is deprecated. Use \"--mixed_precision fp16\" instead.\\', DeprecationWarning)\\n mixed_precision = \"fp16\"\\n \\n- current_env[\"PYTHONPATH\"] = sys.executable\\n+ def env_path_add(env_var_name, new_path):\\n+ \"\"\"extend path-based env variable with a new path\"\"\"\\n+ paths = [p for p in os.environ.get(env_var_name, \"\").split(\":\") if len(p) > 0]\\n+ paths.append(str(new_path))\\n+ return \":\".join(paths)\\n+\\n+ current_env[\"PYTHONPATH\"] = env_path_add(\"PYTHONPATH\", sys.executable)',\n", - " 'from_author': True},\n", - " {'body': '```suggestion\\r\\n current_env[\"PYTHONPATH\"] = env_var_path_add(\"PYTHONPATH\", os.path.abspath(\\'.\\'))\\r\\n```',\n", - " 'diff_hunk': '@@ -573,7 +574,7 @@ def deepspeed_launcher(args):\\n warnings.warn(\\'--fp16 flag is deprecated. Use \"--mixed_precision fp16\" instead.\\', DeprecationWarning)\\n mixed_precision = \"fp16\"\\n \\n- current_env[\"PYTHONPATH\"] = sys.executable\\n+ current_env[\"PYTHONPATH\"] = env_var_path_add(\"PYTHONPATH\", sys.executable)',\n", - " 'from_author': False}],\n", - " 'context': [{'body': '_The documentation is not available anymore as the PR was closed or merged._',\n", - " 'from_author': False},\n", - " {'body': 'Oops, Thanks a lot for pointing this out! Post the above PR, a sample run is shown below:\\r\\n```\\r\\n>>> export PYTHONPATH=\"sample_dir/fancy_library\"\\r\\n...\\r\\n\\r\\n>>> accelerate launch --config_file ds_mn_hf.yaml accelerate/examples/complete_nlp_example.py --mixed_precision \"fp16\"\\r\\n[2022-10-06 12:00:16,739] [INFO] [runner.py:504:main] cmd = pdsh -S -f 1024 -w localhost,sourab_huggingface_co@sourab-vm-image-1 \\r\\nexport PYTHONPATH=sample_dir/fancy_library:/home/sourab_huggingface_co;\\r\\n\\r\\n...\\r\\n\\r\\nexport MIXED_PRECISION=fp16; export USE_DEEPSPEED=true; export DEEPSPEED_ZERO_STAGE=2; export GRADIENT_ACCUMULATION_STEPS=1; export GRADIENT_CLIPPING=1.0; export DEEPSPEED_OFFLOAD_OPTIMIZER_DEVICE=none; export DEEPSPEED_OFFLOAD_PARAM_DEVICE=none; export DEEPSPEED_ZERO3_INIT=false; export DEEPSPEED_ZERO3_SAVE_16BIT_MODEL=none; cd /home/sourab_huggingface_co; /opt/conda/envs/ml/bin/python3.9 -u -m deepspeed.launcher.launch --world_info=eyJsb2NhbGhvc3QiOiBbMF0sICJzb3VyYWJfaHVnZ2luZ2ZhY2VfY29Ac291cmFiLXZtLWltYWdlLTEiOiBbMF19 --node_rank=%n --master_addr=xx.xxx.x.x --master_port=29500 --no_local_rank accelerate/examples/complete_nlp_example.py --mixed_precision \\'fp16\\'\\r\\nlocalhost: [2022-10-06 12:00:22,720] [INFO] [launch.py:136:main] WORLD INFO DICT: {\\'localhost\\': [0], \\'sourab_huggingface_co@sourab-vm-image-1\\': [0]}\\r\\nlocalhost: [2022-10-06 12:00:22,720] [INFO] [launch.py:142:main] nnodes=2, num_local_procs=1, node_rank=0\\r\\nlocalhost: [2022-10-06 12:00:22,720] [INFO] [launch.py:155:main] global_rank_mapping=defaultdict(, {\\'localhost\\': [0], \\'sourab_huggingface_co@sourab-vm-image-1\\': [1]})\\r\\nlocalhost: [2022-10-06 12:00:22,720] [INFO] [launch.py:156:main] dist_world_size=2\\r\\nlocalhost: [2022-10-06 12:00:22,720] [INFO] [launch.py:158:main] Setting CUDA_VISIBLE_DEVICES=0\\r\\nsourab-vm-image-1: [2022-10-06 12:00:23,404] [INFO] [launch.py:136:main] WORLD INFO DICT: {\\'localhost\\': [0], \\'sourab_huggingface_co@sourab-vm-image-1\\': [0]}\\r\\nsourab-vm-image-1: [2022-10-06 12:00:23,404] [INFO] [launch.py:142:main] nnodes=2, num_local_procs=1, node_rank=1\\r\\nsourab-vm-image-1: [2022-10-06 12:00:23,404] [INFO] [launch.py:155:main] global_rank_mapping=defaultdict(, {\\'localhost\\': [0], \\'sourab_huggingface_co@sourab-vm-image-1\\': [1]})\\r\\nsourab-vm-image-1: [2022-10-06 12:00:23,404] [INFO] [launch.py:156:main] dist_world_size=2\\r\\nsourab-vm-image-1: [2022-10-06 12:00:23,404] [INFO] [launch.py:158:main] Setting CUDA_VISIBLE_DEVICES=0\\r\\nlocalhost: [2022-10-06 12:00:27,932] [INFO] [comm.py:633:init_distributed] Initializing TorchBackend in DeepSpeed with backend nccl\\r\\n\\r\\n...\\r\\n\\r\\nlocalhost: [2022-10-06 12:01:10,571] [INFO] [config.py:976:print_user_config] json = {\\r\\nlocalhost: \"train_batch_size\": 32, \\r\\nlocalhost: \"train_micro_batch_size_per_gpu\": 16, \\r\\nlocalhost: \"gradient_accumulation_steps\": 1, \\r\\nlocalhost: \"zero_optimization\": {\\r\\nlocalhost: \"stage\": 2, \\r\\nlocalhost: \"offload_optimizer\": {\\r\\nlocalhost: \"device\": \"none\"\\r\\nlocalhost: }, \\r\\nlocalhost: \"offload_param\": {\\r\\nlocalhost: \"device\": \"none\"\\r\\nlocalhost: }, \\r\\nlocalhost: \"stage3_gather_16bit_weights_on_model_save\": false\\r\\nlocalhost: }, \\r\\nlocalhost: \"gradient_clipping\": 1.0, \\r\\nlocalhost: \"steps_per_print\": inf, \\r\\nlocalhost: \"fp16\": {\\r\\nlocalhost: \"enabled\": true, \\r\\nlocalhost: \"auto_cast\": true\\r\\nlocalhost: }, \\r\\nlocalhost: \"zero_allow_untested_optimizer\": true\\r\\nlocalhost: }\\r\\nlocalhost: Using /home/sourab_huggingface_co/.cache/torch_extensions/py39_cu113 as PyTorch extensions root...\\r\\n\\r\\n...\\r\\n\\r\\nlocalhost: epoch 0: {\\'accuracy\\': 0.7303921568627451, \\'f1\\': 0.8318042813455658}\\r\\nsourab-vm-image-1: epoch 0: {\\'accuracy\\': 0.7303921568627451, \\'f1\\': 0.8318042813455658}\\r\\nlocalhost: epoch 1: {\\'accuracy\\': 0.8308823529411765, \\'f1\\': 0.880415944540728}\\r\\nsourab-vm-image-1: epoch 1: {\\'accuracy\\': 0.8308823529411765, \\'f1\\': 0.880415944540728}\\r\\nlocalhost: epoch 2: {\\'accuracy\\': 0.8333333333333334, \\'f1\\': 0.8811188811188811}\\r\\nsourab-vm-image-1: epoch 2: {\\'accuracy\\': 0.8333333333333334, \\'f1\\': 0.8811188811188811}\\r\\n\\r\\n```',\n", - " 'from_author': False},\n", - " {'body': \"@pacman100, please feel free to merge when you feel it's ready. Thanks.\",\n", - " 'from_author': True}],\n", - " 'url': 'https://api.github.com/repos/huggingface/accelerate/pulls/741'},\n", - " 1076433990: {'diff': 'diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml\\nindex 5d8ff0c1b..593eec741 100644\\n--- a/.github/workflows/test.yml\\n+++ b/.github/workflows/test.yml\\n@@ -6,6 +6,7 @@ on:\\n - \"src/**\"\\n - \"tests/**\"\\n - \".github/**\"\\n+ - \"examples/**\"\\n - \"setup.py\"\\n types: [opened, synchronize, reopened]\\n \\n',\n", - " 'code_comments': [],\n", - " 'context': [{'body': '_The documentation is not available anymore as the PR was closed or merged._',\n", - " 'from_author': False}],\n", - " 'url': 'https://api.github.com/repos/huggingface/accelerate/pulls/740'},\n", - " 1076432519: {'diff': 'diff --git a/docs/source/usage_guides/memory.mdx b/docs/source/usage_guides/memory.mdx\\nindex 8841cd584..213a2f670 100644\\n--- a/docs/source/usage_guides/memory.mdx\\n+++ b/docs/source/usage_guides/memory.mdx\\n@@ -25,16 +25,20 @@ training script. To use it, restructure your training function to include an inn\\n and build your dataloaders inside it. At a minimum, this could look like 4 new lines of code. \\n > Note: The inner function *must* take in the batch size as the first parameter, but we do not pass one to it when called. The wrapper handles this for us\\n \\n+It should also be noted that anything which will consume CUDA memory and passed to the `accelerator` **must** be declared inside the inner function,\\n+such as models and optimizers.\\n+\\n ```diff\\n def training_function(args):\\n accelerator = Accelerator()\\n- model = get_model()\\n- model.to(accelerator.device)\\n- optimizer = get_optimizer()\\n \\n + @find_executable_batch_size(starting_batch_size=args.batch_size)\\n + def inner_training_loop(batch_size):\\n-+ nonlocal model, optimizer # Ensure they can be used in our context\\n++ nonlocal accelerator # Ensure they can be used in our context\\n++ accelerator.free_memory() # Free all lingering references\\n+ model = get_model()\\n+ model.to(accelerator.device)\\n+ optimizer = get_optimizer()\\n train_dataloader, eval_dataloader = get_dataloaders(accelerator, batch_size)\\n lr_scheduler = get_scheduler(\\n optimizer, \\ndiff --git a/examples/by_feature/memory.py b/examples/by_feature/memory.py\\nindex 82dc34b8a..684a32853 100644\\n--- a/examples/by_feature/memory.py\\n+++ b/examples/by_feature/memory.py\\n@@ -122,24 +122,6 @@ def training_function(config, args):\\n \\n metric = evaluate.load(\"glue\", \"mrpc\")\\n \\n- # If the batch size is too big we use gradient accumulation\\n- gradient_accumulation_steps = 1\\n- if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:\\n- gradient_accumulation_steps = batch_size // MAX_GPU_BATCH_SIZE\\n- batch_size = MAX_GPU_BATCH_SIZE\\n-\\n- set_seed(seed)\\n- # Instantiate the model (we build the model here so that the seed also control new weights initialization)\\n- model = AutoModelForSequenceClassification.from_pretrained(\"bert-base-cased\", return_dict=True)\\n-\\n- # We could avoid this line since the accelerator is set with `device_placement=True` (default value).\\n- # Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer\\n- # creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).\\n- model = model.to(accelerator.device)\\n-\\n- # Instantiate optimizer\\n- optimizer = AdamW(params=model.parameters(), lr=lr)\\n-\\n # New Code #\\n # We now can define an inner training loop function. It should take a batch size as the only parameter,\\n # and build the dataloaders in there.\\n@@ -147,16 +129,31 @@ def training_function(config, args):\\n @find_executable_batch_size(starting_batch_size=batch_size)\\n def inner_training_loop(batch_size):\\n # And now just move everything below under this function\\n- # Ensure that anything declared outside this function is set as `nonlocal`\\n- # so it is in scope\\n- nonlocal model, optimizer\\n+ # We need to bring in the Accelerator object from earlier\\n+ nonlocal accelerator\\n+ # And reset all of its attributes that could hold onto any memory:\\n+ accelerator.free_memory()\\n+\\n+ # Then we can declare the model, optimizer, and everything else:\\n+ set_seed(seed)\\n+\\n+ # Instantiate the model (we build the model here so that the seed also control new weights initialization)\\n+ model = AutoModelForSequenceClassification.from_pretrained(\"bert-base-cased\", return_dict=True)\\n+\\n+ # We could avoid this line since the accelerator is set with `device_placement=True` (default value).\\n+ # Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer\\n+ # creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).\\n+ model = model.to(accelerator.device)\\n+\\n+ # Instantiate optimizer\\n+ optimizer = AdamW(params=model.parameters(), lr=lr)\\n train_dataloader, eval_dataloader = get_dataloaders(accelerator, batch_size)\\n \\n # Instantiate scheduler\\n lr_scheduler = get_linear_schedule_with_warmup(\\n optimizer=optimizer,\\n num_warmup_steps=100,\\n- num_training_steps=(len(train_dataloader) * num_epochs) // gradient_accumulation_steps,\\n+ num_training_steps=(len(train_dataloader) * num_epochs),\\n )\\n \\n # Prepare everything\\n@@ -174,12 +171,10 @@ def inner_training_loop(batch_size):\\n batch.to(accelerator.device)\\n outputs = model(**batch)\\n loss = outputs.loss\\n- loss = loss / gradient_accumulation_steps\\n accelerator.backward(loss)\\n- if step % gradient_accumulation_steps == 0:\\n- optimizer.step()\\n- lr_scheduler.step()\\n- optimizer.zero_grad()\\n+ optimizer.step()\\n+ lr_scheduler.step()\\n+ optimizer.zero_grad()\\n \\n model.eval()\\n for step, batch in enumerate(eval_dataloader):\\n',\n", - " 'code_comments': [],\n", - " 'context': [{'body': '_The documentation is not available anymore as the PR was closed or merged._',\n", - " 'from_author': False}],\n", - " 'url': 'https://api.github.com/repos/huggingface/accelerate/pulls/739'},\n", - " 1076041429: {'diff': 'diff --git a/docs/source/usage_guides/training_zoo.mdx b/docs/source/usage_guides/training_zoo.mdx\\nindex d0510bf93..b3fff25e3 100644\\n--- a/docs/source/usage_guides/training_zoo.mdx\\n+++ b/docs/source/usage_guides/training_zoo.mdx\\n@@ -100,3 +100,7 @@ These are tutorials from libraries that integrate with 🤗 Accelerate:\\n ### Tez \\n \\n - [Leaf disease detection with Tez and Accelerate](https://www.kaggle.com/code/abhishek/tez-faster-and-easier-training-for-leaf-detection/notebook)\\n+\\n+### trlx \\n+\\n+- [How to implement a sentiment learning task with trlx](https://github.com/CarperAI/trlx#example-how-to-add-a-task)\\n\\\\ No newline at end of file\\n',\n", - " 'code_comments': [],\n", - " 'context': [{'body': '_The documentation is not available anymore as the PR was closed or merged._',\n", - " 'from_author': False}],\n", - " 'url': 'https://api.github.com/repos/huggingface/accelerate/pulls/738'},\n", - " 1075007649: {'diff': 'diff --git a/docs/source/_toctree.yml b/docs/source/_toctree.yml\\nindex 84c4e03f6..559c798c5 100644\\n--- a/docs/source/_toctree.yml\\n+++ b/docs/source/_toctree.yml\\n@@ -32,9 +32,11 @@\\n - local: usage_guides/memory\\n title: How to avoid CUDA Out-of-Memory\\n - local: usage_guides/sagemaker\\n- title: Using Accelerate on SageMaker\\n+ title: Using 🤗 Accelerate on SageMaker\\n - local: usage_guides/mps\\n title: How to use Apple Silicon M1 GPUs\\n+ - local: usage_guides/training_zoo\\n+ title: 🤗 Accelerate Example Zoo\\n title: How-To Guides\\n - sections:\\n - local: concept_guides/gradient_synchronization\\ndiff --git a/docs/source/usage_guides/training_zoo.mdx b/docs/source/usage_guides/training_zoo.mdx\\nnew file mode 100644\\nindex 000000000..d0510bf93\\n--- /dev/null\\n+++ b/docs/source/usage_guides/training_zoo.mdx\\n@@ -0,0 +1,102 @@\\n+\\n+\\n+# Example Zoo\\n+\\n+Below contains a non-exhuastive list of tutorials and scripts showcasing Accelerate\\n+\\n+## Official Accelerate Examples:\\n+\\n+### Basic Examples\\n+\\n+These examples showcase the base features of Accelerate and are a great starting point\\n+\\n+- [Barebones NLP example](https://github.com/huggingface/accelerate/blob/main/examples/nlp_example.py)\\n+- [Barebones computer vision example](https://github.com/huggingface/accelerate/blob/main/examples/cv_example.py)\\n+\\n+### Feature Specific Examples\\n+\\n+These examples showcase specific features that the Accelerate framework offers\\n+\\n+- [Checkpointing states](https://github.com/huggingface/accelerate/blob/main/examples/by_feature/checkpointing.py)\\n+- [Cross validation](https://github.com/huggingface/accelerate/blob/main/examples/by_feature/cross_validation.py)\\n+- [DeepSpeed](https://github.com/huggingface/accelerate/blob/main/examples/by_feature/deepspeed_with_config_support.py)\\n+- [Fully Sharded Data Parallelism](https://github.com/huggingface/accelerate/blob/main/examples/by_feature/fsdp_with_peak_mem_tracking.py)\\n+- [Gradient accumulation](https://github.com/huggingface/accelerate/blob/main/examples/by_feature/gradient_accumulation.py)\\n+- [Memory-aware batch size finder](https://github.com/huggingface/accelerate/blob/main/examples/by_feature/memory.py)\\n+- [Metric Computation](https://github.com/huggingface/accelerate/blob/main/examples/by_feature/multi_process_metrics.py)\\n+- [Using Trackers](https://github.com/huggingface/accelerate/blob/main/examples/by_feature/tracking.py)\\n+\\n+### Full Examples \\n+\\n+These examples showcase every feature in Accelerate at once that was shown in \"Feature Specific Examples\"\\n+\\n+- [Complete NLP example](https://github.com/huggingface/accelerate/blob/main/examples/complete_nlp_example.py)\\n+- [Complete computer vision example](https://github.com/huggingface/accelerate/blob/main/examples/complete_cv_example.py)\\n+- [Causal language model fine-tuning example](https://github.com/huggingface/transformers/blob/main/examples/pytorch/language-modeling/run_clm_no_trainer.py)\\n+- [Masked language model fine-tuning example](https://github.com/huggingface/transformers/blob/main/examples/pytorch/language-modeling/run_mlm_no_trainer.py)\\n+- [Speech pretraining example](https://github.com/huggingface/transformers/blob/main/examples/pytorch/speech-pretraining/run_wav2vec2_pretraining_no_trainer.py)\\n+- [Translation fine-tuning example](https://github.com/huggingface/transformers/blob/main/examples/pytorch/translation/run_translation_no_trainer.py)\\n+- [Text classification fine-tuning example](https://github.com/huggingface/transformers/blob/main/examples/pytorch/text-classification/run_glue_no_trainer.py)\\n+- [Semantic segmentation fine-tuning example](https://github.com/huggingface/transformers/blob/main/examples/pytorch/semantic-segmentation/run_semantic_segmentation_no_trainer.py)\\n+- [Question answering fine-tuning example](https://github.com/huggingface/transformers/blob/main/examples/pytorch/question-answering/run_qa_no_trainer.py)\\n+- [Beam search question answering fine-tuning example](https://github.com/huggingface/transformers/blob/main/examples/pytorch/question-answering/run_qa_beam_search_no_trainer.py)\\n+- [Multiple choice question answering fine-tuning example](https://github.com/huggingface/transformers/blob/main/examples/pytorch/multiple-choice/run_swag_no_trainer.py)\\n+- [Named entity recognition fine-tuning example](https://github.com/huggingface/transformers/blob/main/examples/pytorch/token-classification/run_ner_no_trainer.py)\\n+- [Image classification fine-tuning example](https://github.com/huggingface/transformers/blob/main/examples/pytorch/image-classification/run_image_classification_no_trainer.py)\\n+- [Summarization fine-tuning example](https://github.com/huggingface/transformers/blob/main/examples/pytorch/summarization/run_summarization_no_trainer.py)\\n+\\n+## Integration Examples \\n+\\n+These are tutorials from libraries that integrate with 🤗 Accelerate: \\n+\\n+### Catalyst\\n+\\n+- [Distributed training tutorial with Catalyst](https://catalyst-team.github.io/catalyst/tutorials/ddp.html)\\n+\\n+### DALLE2-pytorch \\n+\\n+- [Fine-tuning DALLE2](https://github.com/lucidrains/DALLE2-pytorch#usage)\\n+\\n+### 🤗 diffusers\\n+\\n+- [Performing textual inversion with diffusers](https://github.com/huggingface/diffusers/tree/main/examples/textual_inversion)\\n+- [Training DreamBooth with diffusers](https://github.com/huggingface/diffusers/tree/main/examples/dreambooth)\\n+\\n+### fastai \\n+\\n+- [Distributed training from Jupyter Notebooks with fastai](https://docs.fast.ai/tutorial.distributed.html)\\n+- [Basic distributed training examples with fastai](https://docs.fast.ai/examples/distributed_app_examples.html)\\n+\\n+### GradsFlow\\n+\\n+- [Auto Image Classification with GradsFlow](https://docs.gradsflow.com/en/latest/examples/nbs/01-ImageClassification/)\\n+\\n+### imagen-pytorch \\n+\\n+- [Fine-tuning Imagen](https://github.com/lucidrains/imagen-pytorch#usage)\\n+\\n+### Kornia\\n+\\n+- [Fine-tuning vision models with Kornia\\'s Trainer](https://kornia.readthedocs.io/en/latest/get-started/training.html)\\n+\\n+### PyTorch Accelerated \\n+\\n+- [Quickstart distributed training tutorial with PyTorch Accelerated](https://pytorch-accelerated.readthedocs.io/en/latest/quickstart.html)\\n+\\n+### PyTorch3D\\n+\\n+- [Perform Deep Learning with 3D data](https://pytorch3d.org/tutorials/)\\n+\\n+### Tez \\n+\\n+- [Leaf disease detection with Tez and Accelerate](https://www.kaggle.com/code/abhishek/tez-faster-and-easier-training-for-leaf-detection/notebook)\\n',\n", - " 'code_comments': [],\n", - " 'context': [{'body': '_The documentation is not available anymore as the PR was closed or merged._',\n", - " 'from_author': False}],\n", - " 'url': 'https://api.github.com/repos/huggingface/accelerate/pulls/737'},\n", - " 1074942681: {'diff': 'diff --git a/.github/ISSUE_TEMPLATE/bug-report.yml b/.github/ISSUE_TEMPLATE/bug-report.yml\\nindex 69a101307..3ed39ac4b 100644\\n--- a/.github/ISSUE_TEMPLATE/bug-report.yml\\n+++ b/.github/ISSUE_TEMPLATE/bug-report.yml\\n@@ -1,6 +1,5 @@\\n name: \"\\\\U0001F41B Bug Report\"\\n description: Submit a bug report to help us improve Accelerate\\n-labels: [ \"bug\" ]\\n body:\\n - type: textarea\\n id: system-info\\n',\n", - " 'code_comments': [],\n", - " 'context': [{'body': '_The documentation is not available anymore as the PR was closed or merged._',\n", - " 'from_author': False}],\n", - " 'url': 'https://api.github.com/repos/huggingface/accelerate/pulls/735'},\n", - " 1074934430: {'diff': 'diff --git a/docs/source/_toctree.yml b/docs/source/_toctree.yml\\nindex 84c4e03f6..ab24fe8e5 100644\\n--- a/docs/source/_toctree.yml\\n+++ b/docs/source/_toctree.yml\\n@@ -37,6 +37,8 @@\\n title: How to use Apple Silicon M1 GPUs\\n title: How-To Guides\\n - sections:\\n+ - local: concept_guides/performance\\n+ title: Comparing performance across distributed setups\\n - local: concept_guides/gradient_synchronization\\n title: Gradient synchronization\\n - local: concept_guides/deferring_execution\\ndiff --git a/docs/source/concept_guides/deferring_execution.mdx b/docs/source/concept_guides/deferring_execution.mdx\\nindex 4297e2567..cb80ee0da 100644\\n--- a/docs/source/concept_guides/deferring_execution.mdx\\n+++ b/docs/source/concept_guides/deferring_execution.mdx\\n@@ -1,3 +1,15 @@\\n+\\n+\\n # Deferring Executions\\n \\n When you run your usual script, instructions are executed in order. Using 🤗 Accelerate to deploy your script on several\\ndiff --git a/docs/source/concept_guides/performance.mdx b/docs/source/concept_guides/performance.mdx\\nnew file mode 100644\\nindex 000000000..c974b322f\\n--- /dev/null\\n+++ b/docs/source/concept_guides/performance.mdx\\n@@ -0,0 +1,91 @@\\n+\\n+\\n+# Comparing performance between different device setups\\n+\\n+Evaluating and comparing the performance from different setups can be quite tricky if you don\\'t know what to look for.\\n+For example, you cannot run the same script with the same batch size across TPU, multi-GPU, and single-GPU with Accelerate \\n+and expect your results to line up. \\n+\\n+But why?\\n+\\n+There\\'s three reasons for this that this tutorial will cover: \\n+\\n+1. **Setting the right seeds**\\n+2. **Observed Batch Sizes**\\n+3. **Learning Rates**\\n+\\n+## Setting the Seed \\n+\\n+While this issue has not come up as much, make sure to use [`utils.set_seed`] to fully set the seed in all distributed cases so training will be reproducable:\\n+\\n+```python\\n+from accelerate import set_seed\\n+\\n+set_seed(42)\\n+```\\n+\\n+Why is this important? Under the hood this will set **5** different seed settings:\\n+\\n+```python\\n+ random.seed(seed)\\n+ np.random.seed(seed)\\n+ torch.manual_seed(seed)\\n+ torch.cuda.manual_seed_all(seed)\\n+ # ^^ safe to call this function even if cuda is not available\\n+ if is_tpu_available():\\n+ xm.set_rng_state(seed)\\n+```\\n+\\n+The random state, numpy\\'s state, torch, torch\\'s cuda state, and if TPUs are available torch_xla\\'s cuda state.\\n+\\n+## Observed Batch Sizes \\n+\\n+When training with Accelerate, the batch size passed to the dataloader is the **batch size per GPU**. What this entails is \\n+a batch size of 64 on two GPUs is truly a batch size of 128. As a result, when testing on a single GPU this needs to be accounted for,\\n+as well as similarly for TPUs. \\n+\\n+The below table can be used as a quick reference to try out different batch sizes:\\n+\\n+\\n+\\n+In this example there are two GPUs for \"Multi-GPU\" and a TPU pod with 8 workers\\n+\\n+\\n+\\n+| Single GPU Batch Size | Multi-GPU Equivalent Batch Size | TPU Equivalent Batch Size |\\n+|-----------------------|---------------------------------|---------------------------|\\n+| 256 | 128 | 32 |\\n+| 128 | 64 | 16 |\\n+| 64 | 32 | 8 |\\n+| 32 | 16 | 4 |\\n+\\n+## Learning Rates \\n+\\n+As noted in multiple sources[[1](https://aws.amazon.com/blogs/machine-learning/scalable-multi-node-deep-learning-training-using-gpus-in-the-aws-cloud/)][[2](https://docs.nvidia.com/clara/tlt-mi_archive/clara-train-sdk-v2.0/nvmidl/appendix/training_with_multiple_gpus.html)], the learning rate should be scaled *linearly* based on the number of devices present. The below \\n+snippet shows doing so with Accelerate:\\n+\\n+\\n+\\n+Since users can have their own learning rate schedulers defined, we leave this up to the user to decide if they wish to scale their \\n+learning rate or not.\\n+ \\n+\\n+\\n+```python\\n+learning_rate = 1e-3\\n+accelerator = Accelerator()\\n+learning_rate *= accelerator.num_processes\\n+\\n+optimizer = AdamW(params=model.parameters(), lr=learning_rate)\\n+```\\n+\\n',\n", - " 'code_comments': [{'body': '```suggestion\\r\\n\\r\\n\\r\\nIn this example there are two GPUs for \"Multi-GPU\" and a TPU pod with 8 workers\\r\\n\\r\\n\\r\\n```',\n", - " 'diff_hunk': '@@ -0,0 +1,87 @@\\n+\\n+\\n+# Comparing performance between different device setups\\n+\\n+Evaluating and comparing the performance from different setups can be quite tricky if you don\\'t know what to look for.\\n+For example, you cannot run the same script with the same batch size across TPU, multi-GPU, and single-GPU with Accelerate \\n+and expect your results to line up. \\n+\\n+But why?\\n+\\n+There\\'s three reasons for this that this tutorial will cover: \\n+\\n+1. **Setting the right seeds**\\n+2. **Observed Batch Sizes**\\n+3. **Learning Rates**\\n+\\n+## Setting the Seed \\n+\\n+While this issue has not come up as much, make sure to use [`utils.set_seed`] to fully set the seed in all distributed cases so training will be reproducable:\\n+\\n+```python\\n+from accelerate import set_seed\\n+\\n+set_seed(42)\\n+```\\n+\\n+Why is this important? Under the hood this will set **5** different seed settings:\\n+\\n+```python\\n+ random.seed(seed)\\n+ np.random.seed(seed)\\n+ torch.manual_seed(seed)\\n+ torch.cuda.manual_seed_all(seed)\\n+ # ^^ safe to call this function even if cuda is not available\\n+ if is_tpu_available():\\n+ xm.set_rng_state(seed)\\n+```\\n+\\n+The random state, numpy\\'s state, torch, torch\\'s cuda state, and if TPUs are available torch_xla\\'s cuda state.\\n+\\n+## Observed Batch Sizes \\n+\\n+When training with Accelerate, the batch size passed to the dataloader is the **batch size per GPU**. What this entails is \\n+a batch size of 64 on two GPUs is truly a batch size of 128. As a result, when testing on a single GPU this needs to be accounted for,\\n+as well as similarly for TPUs. \\n+\\n+The below table can be used as a quick reference to try out different batch sizes:\\n+\\n+\\n+ In this example there are two GPUs for \"Multi-GPU\" and a TPU pod with 8 workers\\n+',\n", - " 'from_author': False},\n", - " {'body': '```suggestion\\r\\n\\r\\n\\r\\nSince users can have their own learning rate schedulers defined, we leave this up to the user to decide if they wish to scale their \\r\\nlearning rate or not.\\r\\n \\r\\n\\r\\n```',\n", - " 'diff_hunk': '@@ -0,0 +1,87 @@\\n+\\n+\\n+# Comparing performance between different device setups\\n+\\n+Evaluating and comparing the performance from different setups can be quite tricky if you don\\'t know what to look for.\\n+For example, you cannot run the same script with the same batch size across TPU, multi-GPU, and single-GPU with Accelerate \\n+and expect your results to line up. \\n+\\n+But why?\\n+\\n+There\\'s three reasons for this that this tutorial will cover: \\n+\\n+1. **Setting the right seeds**\\n+2. **Observed Batch Sizes**\\n+3. **Learning Rates**\\n+\\n+## Setting the Seed \\n+\\n+While this issue has not come up as much, make sure to use [`utils.set_seed`] to fully set the seed in all distributed cases so training will be reproducable:\\n+\\n+```python\\n+from accelerate import set_seed\\n+\\n+set_seed(42)\\n+```\\n+\\n+Why is this important? Under the hood this will set **5** different seed settings:\\n+\\n+```python\\n+ random.seed(seed)\\n+ np.random.seed(seed)\\n+ torch.manual_seed(seed)\\n+ torch.cuda.manual_seed_all(seed)\\n+ # ^^ safe to call this function even if cuda is not available\\n+ if is_tpu_available():\\n+ xm.set_rng_state(seed)\\n+```\\n+\\n+The random state, numpy\\'s state, torch, torch\\'s cuda state, and if TPUs are available torch_xla\\'s cuda state.\\n+\\n+## Observed Batch Sizes \\n+\\n+When training with Accelerate, the batch size passed to the dataloader is the **batch size per GPU**. What this entails is \\n+a batch size of 64 on two GPUs is truly a batch size of 128. As a result, when testing on a single GPU this needs to be accounted for,\\n+as well as similarly for TPUs. \\n+\\n+The below table can be used as a quick reference to try out different batch sizes:\\n+\\n+\\n+ In this example there are two GPUs for \"Multi-GPU\" and a TPU pod with 8 workers\\n+\\n+\\n+| Single GPU Batch Size | Multi-GPU Equivalent Batch Size | TPU Equivalent Batch Size |\\n+|-----------------------|---------------------------------|---------------------------|\\n+| 256 | 128 | 32 |\\n+| 128 | 64 | 16 |\\n+| 64 | 32 | 8 |\\n+| 32 | 16 | 4 |\\n+\\n+## Learning Rates \\n+\\n+As noted in multiple sources[[1](https://aws.amazon.com/blogs/machine-learning/scalable-multi-node-deep-learning-training-using-gpus-in-the-aws-cloud/)][[2](https://docs.nvidia.com/clara/tlt-mi_archive/clara-train-sdk-v2.0/nvmidl/appendix/training_with_multiple_gpus.html)], the learning rate should be scaled *linearly* based on the number of devices present. The below \\n+snippet shows doing so with Accelerate:\\n+\\n+\\n+ Since users can have their own learning rate schedulers defined, we leave this up to the user to decide if they wish to scale their \\n+ learning rate or not.\\n+',\n", - " 'from_author': False}],\n", - " 'context': [{'body': '_The documentation is not available anymore as the PR was closed or merged._',\n", - " 'from_author': False}],\n", - " 'url': 'https://api.github.com/repos/huggingface/accelerate/pulls/734'},\n", - " 1072847345: {'diff': 'diff --git a/docs/source/package_reference/cli.mdx b/docs/source/package_reference/cli.mdx\\nindex 092b72850..a9e955d43 100644\\n--- a/docs/source/package_reference/cli.mdx\\n+++ b/docs/source/package_reference/cli.mdx\\n@@ -94,6 +94,7 @@ The following arguments are useful for customization of worker machines\\n * `--machine_rank MACHINE_RANK` (`int`) -- The rank of the machine on which this script is launched.\\n * `--num_machines NUM_MACHINES` (`int`) -- The total number of machines used in this training.\\n * `--num_processes NUM_PROCESSES` (`int`) -- The total number of processes to be launched in parallel.\\n+* `--gpu_ids` (`str`) -- What GPUs (by id) should be used for training on this machine as a comma-seperated list\\n * `--main_process_ip MAIN_PROCESS_IP` (`str`) -- The IP address of the machine of rank 0.\\n * `--main_process_port MAIN_PROCESS_PORT` (`int`) -- The port to use to communicate with the machine of rank 0.\\n * `--num_cpu_threads_per_process NUM_CPU_THREADS_PER_PROCESS` (`int`) -- The number of CPU threads per process. Can be tuned for optimal performance.\\ndiff --git a/src/accelerate/commands/config/cluster.py b/src/accelerate/commands/config/cluster.py\\nindex a174115b1..01c419f4f 100644\\n--- a/src/accelerate/commands/config/cluster.py\\n+++ b/src/accelerate/commands/config/cluster.py\\n@@ -35,6 +35,7 @@ def get_cluster_input():\\n \\n machine_rank = 0\\n num_machines = 1\\n+ gpu_ids = None\\n main_process_ip = None\\n main_process_port = None\\n rdzv_backend = \"static\"\\n@@ -293,6 +294,12 @@ def get_cluster_input():\\n default=1,\\n error_message=\"Please enter an integer.\",\\n )\\n+\\n+ if distributed_type in [DistributedType.MULTI_GPU, DistributedType.NO] and not use_cpu:\\n+ gpu_ids = _ask_field(\\n+ \"What GPU(s) (by id) should be used for training on this machine as a comma-seperated list? [all]:\",\\n+ default=\"all\",\\n+ )\\n elif distributed_type in [DistributedType.FSDP, DistributedType.DEEPSPEED]:\\n num_processes = _ask_field(\\n \"How many GPU(s) should be used for distributed training? [1]:\",\\n@@ -325,6 +332,7 @@ def get_cluster_input():\\n compute_environment=ComputeEnvironment.LOCAL_MACHINE,\\n distributed_type=distributed_type,\\n num_processes=num_processes,\\n+ gpu_ids=gpu_ids,\\n mixed_precision=mixed_precision,\\n downcast_bf16=downcast_bf16,\\n machine_rank=machine_rank,\\ndiff --git a/src/accelerate/commands/config/config_args.py b/src/accelerate/commands/config/config_args.py\\nindex 68da64963..43faf45ca 100644\\n--- a/src/accelerate/commands/config/config_args.py\\n+++ b/src/accelerate/commands/config/config_args.py\\n@@ -135,6 +135,7 @@ class ClusterConfig(BaseConfig):\\n num_processes: int\\n machine_rank: int = 0\\n num_machines: int = 1\\n+ gpu_ids: Optional[str] = None\\n main_process_ip: Optional[str] = None\\n main_process_port: Optional[int] = None\\n rdzv_backend: Optional[str] = \"static\"\\ndiff --git a/src/accelerate/commands/launch.py b/src/accelerate/commands/launch.py\\nindex d1905d1c0..982293a33 100644\\n--- a/src/accelerate/commands/launch.py\\n+++ b/src/accelerate/commands/launch.py\\n@@ -259,6 +259,11 @@ def launch_command_parser(subparsers=None):\\n parser.add_argument(\\n \"--num_machines\", type=int, default=None, help=\"The total number of machines used in this training.\"\\n )\\n+ parser.add_argument(\\n+ \"--gpu_ids\",\\n+ default=None,\\n+ help=\"What GPUs (by id) should be used for training on this machine as a comma-seperated list\",\\n+ )\\n parser.add_argument(\\n \"--machine_rank\", type=int, default=None, help=\"The rank of the machine on which this script is launched.\"\\n )\\n@@ -366,6 +371,8 @@ def simple_launcher(args):\\n current_env[\"USE_MPS_DEVICE\"] = str(args.use_mps_device)\\n if args.use_mps_device:\\n current_env[\"PYTORCH_ENABLE_MPS_FALLBACK\"] = \"1\"\\n+ elif args.gpu_ids != \"all\":\\n+ current_env[\"CUDA_VISIBLE_DEVICES\"] = args.gpu_ids\\n if args.num_machines > 1:\\n current_env[\"MASTER_ADDR\"] = args.main_process_ip\\n current_env[\"MASTER_PORT\"] = str(args.main_process_port)\\n@@ -420,6 +427,9 @@ def multi_gpu_launcher(args):\\n setattr(args, \"no_python\", True)\\n \\n current_env = os.environ.copy()\\n+ gpu_ids = getattr(args, \"gpu_ids\")\\n+ if gpu_ids != \"all\":\\n+ current_env[\"CUDA_VISIBLE_DEVICES\"] = gpu_ids\\n mixed_precision = args.mixed_precision.lower()\\n try:\\n mixed_precision = PrecisionType(mixed_precision)\\n@@ -549,6 +559,9 @@ def deepspeed_launcher(args):\\n setattr(args, \"no_python\", True)\\n \\n current_env = os.environ.copy()\\n+ gpu_ids = getattr(args, \"gpu_ids\")\\n+ if gpu_ids != \"all\":\\n+ current_env[\"CUDA_VISIBLE_DEVICES\"] = gpu_ids\\n try:\\n mixed_precision = PrecisionType(args.mixed_precision.lower())\\n except ValueError:\\n@@ -817,6 +830,14 @@ def launch_command(args):\\n args.tpu = defaults.distributed_type == DistributedType.TPU\\n args.use_fsdp = defaults.distributed_type == DistributedType.FSDP\\n args.use_mps_device = defaults.distributed_type == DistributedType.MPS\\n+ if not args.use_mps_device:\\n+ if args.gpu_ids is None:\\n+ if defaults.gpu_ids is not None:\\n+ args.gpu_ids = defaults.gpu_ids\\n+ else:\\n+ args.gpu_ids = \"all\"\\n+ if len(args.gpu_ids.split(\",\")) < 2 and args.multi_gpu and (args.gpu_ids != \"all\"):\\n+ args.multi_gpu = False\\n if defaults.compute_environment == ComputeEnvironment.LOCAL_MACHINE:\\n # Update args with the defaults\\n for name, attr in defaults.__dict__.items():\\n',\n", - " 'code_comments': [{'body': 'I meant with respect to these changes\\n',\n", - " 'diff_hunk': '@@ -817,6 +830,13 @@ def launch_command(args):\\n args.tpu = defaults.distributed_type == DistributedType.TPU\\n args.use_fsdp = defaults.distributed_type == DistributedType.FSDP\\n args.use_mps_device = defaults.distributed_type == DistributedType.MPS\\n+ if args.gpu_ids is None:\\n+ if defaults.gpu_ids is not None:\\n+ args.gpu_ids = defaults.gpu_ids\\n+ else:\\n+ args.gpu_ids = \"all\"\\n+ if len(args.gpu_ids.split(\",\")) < 2 and args.multi_gpu and (args.gpu_ids != \"all\"):\\n+ args.multi_gpu = False',\n", - " 'from_author': False},\n", - " {'body': 'Let me know what you think about my solution here: https://github.com/huggingface/accelerate/pull/732/commits/0467d4ff0bacda7ffb0d6087a7ef636cfbbb4960',\n", - " 'diff_hunk': '@@ -817,6 +830,13 @@ def launch_command(args):\\n args.tpu = defaults.distributed_type == DistributedType.TPU\\n args.use_fsdp = defaults.distributed_type == DistributedType.FSDP\\n args.use_mps_device = defaults.distributed_type == DistributedType.MPS\\n+ if args.gpu_ids is None:\\n+ if defaults.gpu_ids is not None:\\n+ args.gpu_ids = defaults.gpu_ids\\n+ else:\\n+ args.gpu_ids = \"all\"\\n+ if len(args.gpu_ids.split(\",\")) < 2 and args.multi_gpu and (args.gpu_ids != \"all\"):\\n+ args.multi_gpu = False',\n", - " 'from_author': True},\n", - " {'body': 'Nice! This should have expected behaviour. 🤗',\n", - " 'diff_hunk': '@@ -817,6 +830,13 @@ def launch_command(args):\\n args.tpu = defaults.distributed_type == DistributedType.TPU\\n args.use_fsdp = defaults.distributed_type == DistributedType.FSDP\\n args.use_mps_device = defaults.distributed_type == DistributedType.MPS\\n+ if args.gpu_ids is None:\\n+ if defaults.gpu_ids is not None:\\n+ args.gpu_ids = defaults.gpu_ids\\n+ else:\\n+ args.gpu_ids = \"all\"\\n+ if len(args.gpu_ids.split(\",\")) < 2 and args.multi_gpu and (args.gpu_ids != \"all\"):\\n+ args.multi_gpu = False',\n", - " 'from_author': False}],\n", - " 'context': [{'body': '_The documentation is not available anymore as the PR was closed or merged._',\n", - " 'from_author': False},\n", - " {'body': \"@pacman100 correct, this is solely for non-M1 GPUs, I'll specify that in the info description for it\",\n", - " 'from_author': True}],\n", - " 'url': 'https://api.github.com/repos/huggingface/accelerate/pulls/732'},\n", - " 1070510180: {'diff': 'diff --git a/src/accelerate/scheduler.py b/src/accelerate/scheduler.py\\nindex 4ae91a657..835d4e0d9 100644\\n--- a/src/accelerate/scheduler.py\\n+++ b/src/accelerate/scheduler.py\\n@@ -69,7 +69,9 @@ def step(self, *args, **kwargs):\\n num_processes = AcceleratorState().num_processes\\n for _ in range(num_processes):\\n # Special case when using OneCycle and `drop_last` was not used\\n- if getattr(self.scheduler, \"total_steps\", 0) <= self.scheduler.last_epoch:\\n+ if hasattr(self.scheduler, \"total_steps\") and self.scheduler._step_count <= self.scheduler.total_steps:\\n+ self.scheduler.step(*args, **kwargs)\\n+ else:\\n self.scheduler.step(*args, **kwargs)\\n \\n # Passthroughs\\ndiff --git a/tests/test_scheduler.py b/tests/test_scheduler.py\\nindex be4f975fb..c1ef18f1e 100644\\n--- a/tests/test_scheduler.py\\n+++ b/tests/test_scheduler.py\\n@@ -21,12 +21,30 @@\\n from accelerate.test_utils import require_cpu\\n \\n \\n-def scheduler_test(num_processes=2, step_scheduler_with_optimizer=True, split_batches=False):\\n+def one_cycle_test(num_processes=2, step_scheduler_with_optimizer=True, split_batches=False):\\n accelerator = Accelerator(step_scheduler_with_optimizer=step_scheduler_with_optimizer, split_batches=split_batches)\\n model = torch.nn.Linear(2, 4)\\n optimizer = torch.optim.AdamW(model.parameters(), lr=1.0)\\n- scheduler = torch.optim.lr_scheduler.LambdaLR(optimizer, lr_lambda=lambda n: 1 - n / 10)\\n+ scheduler = torch.optim.lr_scheduler.OneCycleLR(optimizer, max_lr=0.01, steps_per_epoch=2, epochs=1)\\n+ model, optimizer, scheduler = accelerator.prepare(model, optimizer, scheduler)\\n \\n+ # Optimizer has stepped\\n+ scheduler.step()\\n+ if step_scheduler_with_optimizer or (num_processes == 1):\\n+ assert (\\n+ scheduler.scheduler.last_epoch == num_processes\\n+ ), f\"Last Epoch ({scheduler.scheduler.last_epoch}) != Num Processes ({num_processes})\"\\n+ else:\\n+ assert (\\n+ scheduler.scheduler.last_epoch != num_processes\\n+ ), f\"Last Epoch ({scheduler.scheduler.last_epoch}) == Num Processes ({num_processes})\"\\n+\\n+\\n+def lambda_test(num_processes=2, step_scheduler_with_optimizer=True, split_batches=False):\\n+ accelerator = Accelerator(step_scheduler_with_optimizer=step_scheduler_with_optimizer, split_batches=split_batches)\\n+ model = torch.nn.Linear(2, 4)\\n+ optimizer = torch.optim.AdamW(model.parameters(), lr=1.0)\\n+ scheduler = torch.optim.lr_scheduler.LambdaLR(optimizer, lr_lambda=lambda n: 1 - n / 10)\\n model, optimizer, scheduler = accelerator.prepare(model, optimizer, scheduler)\\n \\n # Optimizer has stepped\\n@@ -49,16 +67,30 @@ def scheduler_test(num_processes=2, step_scheduler_with_optimizer=True, split_ba\\n \\n @require_cpu\\n class SchedulerTester(unittest.TestCase):\\n- def test_scheduler_steps_with_optimizer_single_process(self):\\n- debug_launcher(partial(scheduler_test, num_processes=1), num_processes=1)\\n- debug_launcher(partial(scheduler_test, num_processes=1, split_batches=True), num_processes=1)\\n+ def test_lambda_scheduler_steps_with_optimizer_single_process(self):\\n+ debug_launcher(partial(lambda_test, num_processes=1), num_processes=1)\\n+ debug_launcher(partial(lambda_test, num_processes=1, split_batches=True), num_processes=1)\\n+\\n+ def test_one_cycle_scheduler_steps_with_optimizer_single_process(self):\\n+ debug_launcher(partial(one_cycle_test, num_processes=1), num_processes=1)\\n+ debug_launcher(partial(one_cycle_test, num_processes=1, split_batches=True), num_processes=1)\\n+\\n+ def test_lambda_scheduler_not_step_with_optimizer_single_process(self):\\n+ debug_launcher(partial(lambda_test, num_processes=1, step_scheduler_with_optimizer=False), num_processes=1)\\n+\\n+ def test_one_cycle_scheduler_not_step_with_optimizer_single_process(self):\\n+ debug_launcher(partial(one_cycle_test, num_processes=1, step_scheduler_with_optimizer=False), num_processes=1)\\n+\\n+ def test_lambda_scheduler_steps_with_optimizer_multiprocess(self):\\n+ debug_launcher(lambda_test)\\n+ debug_launcher(partial(lambda_test, num_processes=1, split_batches=True), num_processes=1)\\n \\n- def test_scheduler_not_step_with_optimizer_single_process(self):\\n- debug_launcher(partial(scheduler_test, num_processes=1, step_scheduler_with_optimizer=False), num_processes=1)\\n+ def test_one_cycle_scheduler_steps_with_optimizer_multiprocess(self):\\n+ debug_launcher(one_cycle_test)\\n+ debug_launcher(partial(one_cycle_test, num_processes=1, split_batches=True), num_processes=1)\\n \\n- def test_scheduler_steps_with_optimizer_multiprocess(self):\\n- debug_launcher(scheduler_test)\\n- debug_launcher(partial(scheduler_test, num_processes=1, split_batches=True), num_processes=1)\\n+ def test_lambda_scheduler_not_step_with_optimizer_multiprocess(self):\\n+ debug_launcher(partial(lambda_test, step_scheduler_with_optimizer=False))\\n \\n- def test_scheduler_not_step_with_optimizer_multiprocess(self):\\n- debug_launcher(partial(scheduler_test, step_scheduler_with_optimizer=False))\\n+ def test_one_cycle_scheduler_not_step_with_optimizer_multiprocess(self):\\n+ debug_launcher(partial(one_cycle_test, step_scheduler_with_optimizer=False))\\n',\n", - " 'code_comments': [],\n", - " 'context': [{'body': '_The documentation is not available anymore as the PR was closed or merged._',\n", - " 'from_author': False}],\n", - " 'url': 'https://api.github.com/repos/huggingface/accelerate/pulls/728'},\n", - " 1070245844: {'diff': \"diff --git a/.github/workflows/build_and_run_tests.yml b/.github/workflows/build_and_run_tests.yml\\nindex a5378353d..abf9f3883 100644\\n--- a/.github/workflows/build_and_run_tests.yml\\n+++ b/.github/workflows/build_and_run_tests.yml\\n@@ -34,8 +34,8 @@ jobs:\\n done\\n \\n build-docker-containers:\\n- needs: check-for-setup\\n- if: (github.event_name == 'push') && (needs.check-for-setup.outputs.changed == '1')\\n+ needs: check-for-source\\n+ if: (github.event_name == 'push') && (needs.check-for-source.outputs.changed == '1')\\n uses: ./.github/workflows/build_docker_images.yml\\n secrets: inherit\\n \\n\",\n", - " 'code_comments': [],\n", - " 'context': [{'body': '_The documentation is not available anymore as the PR was closed or merged._',\n", - " 'from_author': False}],\n", - " 'url': 'https://api.github.com/repos/huggingface/accelerate/pulls/727'},\n", - " 1069149049: {'diff': 'diff --git a/src/accelerate/commands/launch.py b/src/accelerate/commands/launch.py\\nindex bf57d11a4..d1905d1c0 100644\\n--- a/src/accelerate/commands/launch.py\\n+++ b/src/accelerate/commands/launch.py\\n@@ -844,8 +844,8 @@ def launch_command(args):\\n args.mixed_precision = defaults.mixed_precision\\n else:\\n if args.num_processes is None:\\n- warned.append(\"\\\\t`--num_processes` was set to a value of `1`\")\\n- args.num_processes = 1\\n+ args.num_processes = torch.cuda.device_count() if args.multi_gpu else 1\\n+ warned.append(\"\\\\t`--num_processes` was set to a value of `{args.num_processes}`\")\\n if args.num_machines is None:\\n warned.append(\"\\\\t`--num_machines` was set to a value of `1`\")\\n args.num_machines = 1\\n@@ -854,14 +854,6 @@ def launch_command(args):\\n args.mixed_precision = \"no\"\\n if not hasattr(args, \"use_cpu\"):\\n args.use_cpu = args.cpu\\n- if args.multi_gpu and args.num_processes == 1:\\n- args.num_processes = torch.cuda.device_count()\\n- if not any(\"--num_processes\" in warn for warn in warned):\\n- warned.append(f\"\\\\t`--num_processes` was set to `{args.num_processes}`\")\\n- else:\\n- for i, warn in enumerate(warned):\\n- if \"--num_processes\" in warn:\\n- warned[i] = warn.replace(\"`1`\", f\"`{args.num_processes}`\")\\n \\n if args.num_cpu_threads_per_process is None:\\n local_size = get_int_from_env(\\n',\n", - " 'code_comments': [{'body': '```suggestion\\r\\n args.num_processes = torch.cuda.device_count() if args.multi_gpu else 1\\r\\n warned.append(\"\\\\t`--num_processes` was set to a value of `{args.num_processes}`\")\\r\\n```',\n", - " 'diff_hunk': '@@ -845,7 +845,7 @@ def launch_command(args):\\n else:\\n if args.num_processes is None:\\n warned.append(\"\\\\t`--num_processes` was set to a value of `1`\")',\n", - " 'from_author': False},\n", - " {'body': '```suggestion\\r\\n```',\n", - " 'diff_hunk': '@@ -845,7 +845,7 @@ def launch_command(args):\\n else:\\n if args.num_processes is None:\\n warned.append(\"\\\\t`--num_processes` was set to a value of `1`\")\\n- args.num_processes = 1\\n+ args.num_processes = torch.cuda.device_count() if args.multi_gpu else 1',\n", - " 'from_author': False}],\n", - " 'context': [{'body': '_The documentation is not available anymore as the PR was closed or merged._',\n", - " 'from_author': False}],\n", - " 'url': 'https://api.github.com/repos/huggingface/accelerate/pulls/726'},\n", - " 1069096305: {'diff': 'diff --git a/.github/workflows/build-docker-images-release.yml b/.github/workflows/build-docker-images-release.yml\\nnew file mode 100644\\nindex 000000000..654259f87\\n--- /dev/null\\n+++ b/.github/workflows/build-docker-images-release.yml\\n@@ -0,0 +1,64 @@\\n+name: Build Docker images (releases)\\n+\\n+on:\\n+ workflow_dispatch:\\n+ release:\\n+ types: [published]\\n+\\n+concurrency:\\n+ group: docker-image-builds\\n+ cancel-in-progress: false\\n+\\n+jobs:\\n+ get-version:\\n+ runs-on: ubuntu-latest\\n+ outputs:\\n+ version: ${{ steps.step1.outputs.version }}\\n+ steps:\\n+ - uses: actions/checkout@v3\\n+ - id: step1\\n+ run: echo \"::set-output name=version::$(python setup.py --version)\"\\n+\\n+ version-cpu:\\n+ name: \"Latest Accelerate CPU [version]\"\\n+ runs-on: ubuntu-latest\\n+ needs: get-version\\n+ steps:\\n+ - name: Set up Docker Buildx\\n+ uses: docker/setup-buildx-action@v1\\n+ - name: Check out code\\n+ uses: actions/checkout@v2\\n+ - name: Login to DockerHub\\n+ uses: docker/login-action@v1\\n+ with:\\n+ username: ${{ secrets.DOCKERHUB_USERNAME }}\\n+ password: ${{ secrets.DOCKERHUB_PASSWORD }}\\n+\\n+ - name: Build and Push CPU\\n+ uses: docker/build-push-action@v2\\n+ with:\\n+ context: ./docker/accelerate-cpu\\n+ push: true\\n+ tags: huggingface/accelerate-cpu:${{needs.get-version.outputs.version}}\\n+\\n+ version-cuda:\\n+ name: \"Latest Accelerate GPU [version]\"\\n+ runs-on: ubuntu-latest\\n+ needs: get-version\\n+ steps:\\n+ - name: Set up Docker Buildx\\n+ uses: docker/setup-buildx-action@v1\\n+ - name: Check out code\\n+ uses: actions/checkout@v2\\n+ - name: Login to DockerHub\\n+ uses: docker/login-action@v1\\n+ with:\\n+ username: ${{ secrets.DOCKERHUB_USERNAME }}\\n+ password: ${{ secrets.DOCKERHUB_PASSWORD }}\\n+\\n+ - name: Build and Push GPU\\n+ uses: docker/build-push-action@v2\\n+ with:\\n+ context: ./docker/accelerate-gpu\\n+ push: true\\n+ tags: huggingface/accelerate-gpu:${{needs.get-version.outputs.version}}\\n\\\\ No newline at end of file\\n',\n", - " 'code_comments': [],\n", - " 'context': [{'body': '_The documentation is not available anymore as the PR was closed or merged._',\n", - " 'from_author': False},\n", - " {'body': \"> FFT, should we just merge these two release workflows together and check if 'dev' is in the version to automatically tag the `latest` tag? Or maybe just publish the prerelease tags? WDYT?\\r\\n\\r\\nI guess you are talking about the existing `build_docker_images.yml` here. I don't have particular opinion about if merging these 2 workflow files or not.\",\n", - " 'from_author': False},\n", - " {'body': \"@ydshieh by other folks, not for testing. We've had people use the latest images already, so made sense to also do releases \\r\\n\\r\\nOkay, will think on this and do a follow up most likely with the merge \",\n", - " 'from_author': True}],\n", - " 'url': 'https://api.github.com/repos/huggingface/accelerate/pulls/725'},\n", - " 1068881556: {'diff': 'diff --git a/src/accelerate/data_loader.py b/src/accelerate/data_loader.py\\nindex 1eb8ff79d..fac1b50e2 100644\\n--- a/src/accelerate/data_loader.py\\n+++ b/src/accelerate/data_loader.py\\n@@ -347,7 +347,6 @@ def __iter__(self):\\n try:\\n current_batch = next(dataloader_iter)\\n except StopIteration:\\n- self.gradient_state._iterate_samples_seen(find_batch_size(current_batch))\\n yield\\n while True:\\n try:\\n',\n", - " 'code_comments': [],\n", - " 'context': [{'body': '_The documentation is not available anymore as the PR was closed or merged._',\n", - " 'from_author': False}],\n", - " 'url': 'https://api.github.com/repos/huggingface/accelerate/pulls/724'},\n", - " 1067475067: {'diff': 'diff --git a/src/accelerate/accelerator.py b/src/accelerate/accelerator.py\\nindex ab4b48660..2b6558944 100644\\n--- a/src/accelerate/accelerator.py\\n+++ b/src/accelerate/accelerator.py\\n@@ -619,6 +619,12 @@ def prepare(self, *args, device_placement=None):\\n device_placement (`List[bool]`, *optional*):\\n Used to customize whether automatic device placement should be performed for each object passed. Needs\\n to be a list of the same length as `args`.\\n+\\n+ \\n+\\n+ You don\\'t need to prepare a model if you only use it for inference without any kind of mixed precision\\n+\\n+ \\n \"\"\"\\n if device_placement is None:\\n device_placement = [None for _ in args]\\n@@ -699,7 +705,8 @@ def prepare_model(self, model: torch.nn.Module, device_placement=None):\\n \\n Args:\\n model (`torch.nn.Module`):\\n- A PyTorch model to prepare\\n+ A PyTorch model to prepare. You don\\'t need to prepare a model if it is used only for inference without\\n+ any kind of mixed precision\\n device_placement (`bool`, *optional*):\\n Whether or not to place the model on the proper device. Will default to `self.device_placement`.\\n \"\"\"\\n',\n", - " 'code_comments': [],\n", - " 'context': [{'body': '_The documentation is not available anymore as the PR was closed or merged._',\n", - " 'from_author': False}],\n", - " 'url': 'https://api.github.com/repos/huggingface/accelerate/pulls/722'},\n", - " 1065394516: {'diff': 'diff --git a/src/accelerate/accelerator.py b/src/accelerate/accelerator.py\\nindex cd74b68e2..ab4b48660 100644\\n--- a/src/accelerate/accelerator.py\\n+++ b/src/accelerate/accelerator.py\\n@@ -547,15 +547,15 @@ def print(self, *args, **kwargs):\\n if self.is_local_main_process:\\n print(*args, **kwargs)\\n \\n- def _prepare_one(self, obj, first_pass=False):\\n+ def _prepare_one(self, obj, first_pass=False, device_placement=None):\\n # First pass of preparation: DataLoader, model, optimizer\\n if first_pass:\\n if isinstance(obj, torch.utils.data.DataLoader):\\n- return self.prepare_data_loader(obj)\\n+ return self.prepare_data_loader(obj, device_placement=device_placement)\\n elif isinstance(obj, torch.nn.Module):\\n- return self.prepare_model(obj)\\n+ return self.prepare_model(obj, device_placement=device_placement)\\n elif isinstance(obj, torch.optim.Optimizer):\\n- optimizer = self.prepare_optimizer(obj)\\n+ optimizer = self.prepare_optimizer(obj, device_placement=device_placement)\\n return optimizer\\n # Second pass of preparation: LR scheduler (which need the full list of optimizers)\\n elif isinstance(obj, torch.optim.lr_scheduler._LRScheduler):\\n@@ -602,17 +602,33 @@ def _prepare_fsdp(self, *args):\\n self._optimizers = optimizers\\n return tuple(result)\\n \\n- def prepare(self, *args):\\n+ def prepare(self, *args, device_placement=None):\\n \"\"\"\\n Prepare all objects passed in `args` for distributed training and mixed precision, then return them in the same\\n order.\\n \\n- Accepts the following type of objects:\\n+ Args:\\n+ *args (list of objects):\\n+ Any of the following type of objects:\\n+\\n+ - `torch.utils.data.DataLoader`: PyTorch Dataloader\\n+ - `torch.nn.Module`: PyTorch Module\\n+ - `torch.optim.Optimizer`: PyTorch Optimizer\\n+ - `torch.optim.lr_scheduler._LRScheduler`: PyTorch LR Scheduler\\n \\n- - `torch.utils.data.DataLoader`: PyTorch Dataloader\\n- - `torch.nn.Module`: PyTorch Module\\n- - `torch.optim.Optimizer`: PyTorch Optimizer\\n+ device_placement (`List[bool]`, *optional*):\\n+ Used to customize whether automatic device placement should be performed for each object passed. Needs\\n+ to be a list of the same length as `args`.\\n \"\"\"\\n+ if device_placement is None:\\n+ device_placement = [None for _ in args]\\n+ elif self.distributed_type == DistributedType.DEEPSPEED:\\n+ raise ValueError(\"You can\\'t customize device placements with DeepSpeed.\")\\n+ elif len(device_placement) != len(args):\\n+ raise ValueError(\\n+ f\"`device_placement` should be a list with {len(args)} elements (the number of objects passed).\"\\n+ )\\n+\\n if self.distributed_type == DistributedType.FSDP:\\n model_count = 0\\n optimizer_present = False\\n@@ -656,8 +672,10 @@ def prepare(self, *args):\\n if self.distributed_type == DistributedType.DEEPSPEED:\\n result = self._prepare_deepspeed(*args)\\n else:\\n- result = tuple(self._prepare_one(obj, first_pass=True) for obj in args)\\n- result = tuple(self._prepare_one(obj) for obj in result)\\n+ result = tuple(\\n+ self._prepare_one(obj, first_pass=True, device_placement=d) for obj, d in zip(args, device_placement)\\n+ )\\n+ result = tuple(self._prepare_one(obj, device_placement=d) for obj, d in zip(result, device_placement))\\n \\n if tpu_should_fix_optimizer:\\n # 2. grabbing new model parameters\\n@@ -674,7 +692,7 @@ def prepare(self, *args):\\n \\n return result if len(result) > 1 else result[0]\\n \\n- def prepare_model(self, model: torch.nn.Module):\\n+ def prepare_model(self, model: torch.nn.Module, device_placement=None):\\n \"\"\"\\n Prepares a PyTorch model for training in any distributed setup. It is recommended to use\\n [`Accelerator.prepare`] instead.\\n@@ -682,9 +700,13 @@ def prepare_model(self, model: torch.nn.Module):\\n Args:\\n model (`torch.nn.Module`):\\n A PyTorch model to prepare\\n+ device_placement (`bool`, *optional*):\\n+ Whether or not to place the model on the proper device. Will default to `self.device_placement`.\\n \"\"\"\\n+ if device_placement is None:\\n+ device_placement = self.device_placement and self.distributed_type != DistributedType.FSDP\\n self._models.append(model)\\n- if self.device_placement and self.distributed_type != DistributedType.FSDP:\\n+ if device_placement:\\n model = model.to(self.device)\\n if self.distributed_type == DistributedType.MULTI_GPU:\\n kwargs = self.ddp_handler.to_kwargs() if self.ddp_handler is not None else {}\\n@@ -894,7 +916,7 @@ def _prepare_deepspeed(self, *args):\\n )\\n return tuple(result)\\n \\n- def prepare_data_loader(self, data_loader: torch.utils.data.DataLoader):\\n+ def prepare_data_loader(self, data_loader: torch.utils.data.DataLoader, device_placement=None):\\n \"\"\"\\n Prepares a PyTorch DataLoader for training in any distributed setup. It is recommended to use\\n [`Accelerator.prepare`] instead.\\n@@ -902,19 +924,24 @@ def prepare_data_loader(self, data_loader: torch.utils.data.DataLoader):\\n Args:\\n data_loader (`torch.utils.data.DataLoader`):\\n A vanilla PyTorch DataLoader to prepare\\n+ device_placement (`bool`, *optional*):\\n+ Whether or not to place the batches on the proper device in the prepared dataloader. Will default to\\n+ `self.device_placement`.\\n \"\"\"\\n+ if device_placement is None:\\n+ device_placement = self.device_placement if self.distributed_type != DistributedType.TPU else False\\n return prepare_data_loader(\\n data_loader,\\n self.device,\\n num_processes=self.num_processes,\\n process_index=self.process_index,\\n split_batches=self.split_batches,\\n- put_on_device=self.device_placement if self.distributed_type != DistributedType.TPU else False,\\n+ put_on_device=device_placement,\\n rng_types=self.rng_types.copy(),\\n dispatch_batches=self.dispatch_batches,\\n )\\n \\n- def prepare_optimizer(self, optimizer: torch.optim.Optimizer):\\n+ def prepare_optimizer(self, optimizer: torch.optim.Optimizer, device_placement=None):\\n \"\"\"\\n Prepares a PyTorch Optimizer for training in any distributed setup. It is recommended to use\\n [`Accelerator.prepare`] instead.\\n@@ -922,8 +949,12 @@ def prepare_optimizer(self, optimizer: torch.optim.Optimizer):\\n Args:\\n optimizer (`torch.optim.Optimizer`):\\n A vanilla PyTorch optimizer to prepare\\n+ device_placement (`bool`, *optional*):\\n+ Whether or not to place the optimizer on the proper device. Will default to `self.device_placement`.\\n \"\"\"\\n- optimizer = AcceleratedOptimizer(optimizer, device_placement=self.device_placement, scaler=self.scaler)\\n+ if device_placement is None:\\n+ device_placement = self.device_placement\\n+ optimizer = AcceleratedOptimizer(optimizer, device_placement=device_placement, scaler=self.scaler)\\n self._optimizers.append(optimizer)\\n return optimizer\\n \\n',\n", - " 'code_comments': [{'body': 'Perhaps we can be a bit more specific here:\\r\\n\\r\\n```suggestion\\r\\n device_placement (`List[bool]`, *optional*):\\r\\n Used to customize whether automatic device placement should be performed for each object passed. Needs to be a list of the same length as\\r\\n `args`.\\r\\n```',\n", - " 'diff_hunk': '@@ -602,17 +602,32 @@ def _prepare_fsdp(self, *args):\\n self._optimizers = optimizers\\n return tuple(result)\\n \\n- def prepare(self, *args):\\n+ def prepare(self, *args, device_placement=None):\\n \"\"\"\\n Prepare all objects passed in `args` for distributed training and mixed precision, then return them in the same\\n order.\\n \\n- Accepts the following type of objects:\\n+ Args:\\n+ *args:\\n+ Any of the following type of objects:\\n+\\n+ - `torch.utils.data.DataLoader`: PyTorch Dataloader\\n+ - `torch.nn.Module`: PyTorch Module\\n+ - `torch.optim.Optimizer`: PyTorch Optimizer\\n+ - `torch.optim.lr_scheduler._LRScheduler`: PyTorch LR Scheduler\\n+ device_placement (`List[bool]`, *optional*):\\n+ Used to customize the device placement for each object passed. Needs to be a list of the same length as\\n+ `args`.',\n", - " 'from_author': False}],\n", - " 'context': [{'body': '_The documentation is not available anymore as the PR was closed or merged._',\n", - " 'from_author': False}],\n", - " 'url': 'https://api.github.com/repos/huggingface/accelerate/pulls/716'},\n", - " 1065343886: {'diff': 'diff --git a/src/accelerate/accelerator.py b/src/accelerate/accelerator.py\\nindex bb58ca57f..cd74b68e2 100644\\n--- a/src/accelerate/accelerator.py\\n+++ b/src/accelerate/accelerator.py\\n@@ -674,7 +674,15 @@ def prepare(self, *args):\\n \\n return result if len(result) > 1 else result[0]\\n \\n- def prepare_model(self, model):\\n+ def prepare_model(self, model: torch.nn.Module):\\n+ \"\"\"\\n+ Prepares a PyTorch model for training in any distributed setup. It is recommended to use\\n+ [`Accelerator.prepare`] instead.\\n+\\n+ Args:\\n+ model (`torch.nn.Module`):\\n+ A PyTorch model to prepare\\n+ \"\"\"\\n self._models.append(model)\\n if self.device_placement and self.distributed_type != DistributedType.FSDP:\\n model = model.to(self.device)\\n@@ -886,7 +894,15 @@ def _prepare_deepspeed(self, *args):\\n )\\n return tuple(result)\\n \\n- def prepare_data_loader(self, data_loader):\\n+ def prepare_data_loader(self, data_loader: torch.utils.data.DataLoader):\\n+ \"\"\"\\n+ Prepares a PyTorch DataLoader for training in any distributed setup. It is recommended to use\\n+ [`Accelerator.prepare`] instead.\\n+\\n+ Args:\\n+ data_loader (`torch.utils.data.DataLoader`):\\n+ A vanilla PyTorch DataLoader to prepare\\n+ \"\"\"\\n return prepare_data_loader(\\n data_loader,\\n self.device,\\n@@ -898,12 +914,28 @@ def prepare_data_loader(self, data_loader):\\n dispatch_batches=self.dispatch_batches,\\n )\\n \\n- def prepare_optimizer(self, optimizer):\\n+ def prepare_optimizer(self, optimizer: torch.optim.Optimizer):\\n+ \"\"\"\\n+ Prepares a PyTorch Optimizer for training in any distributed setup. It is recommended to use\\n+ [`Accelerator.prepare`] instead.\\n+\\n+ Args:\\n+ optimizer (`torch.optim.Optimizer`):\\n+ A vanilla PyTorch optimizer to prepare\\n+ \"\"\"\\n optimizer = AcceleratedOptimizer(optimizer, device_placement=self.device_placement, scaler=self.scaler)\\n self._optimizers.append(optimizer)\\n return optimizer\\n \\n- def prepare_scheduler(self, scheduler):\\n+ def prepare_scheduler(self, scheduler: torch.optim.lr_scheduler._LRScheduler):\\n+ \"\"\"\\n+ Prepares a PyTorch Scheduler for training in any distributed setup. It is recommended to use\\n+ [`Accelerator.prepare`] instead.\\n+\\n+ Args:\\n+ scheduler (`torch.optim.lr_scheduler._LRScheduler`):\\n+ A vanilla PyTorch scheduler to prepare\\n+ \"\"\"\\n # We try to find the optimizer associated with `scheduler`, the default is the full list.\\n optimizer = self._optimizers\\n for opt in self._optimizers:\\n@@ -1133,7 +1165,7 @@ def init_trackers(self, project_name: str, config: Optional[dict] = None, init_k\\n Optional starting configuration to be logged.\\n init_kwargs (`dict`, *optional*):\\n A nested dictionary of kwargs to be passed to a specific tracker\\'s `__init__` function. Should be\\n- formatted like this:\\n+ formatted like so:\\n ```python\\n {\"wandb\": {\"tags\": [\"tag_a\", \"tag_b\"]}}\\n ```\\n@@ -1182,7 +1214,7 @@ def log(self, values: dict, step: Optional[int] = None, log_kwargs: Optional[dic\\n The run step. If included, the log will be affiliated with this step.\\n log_kwargs (`dict`, *optional*):\\n A nested dictionary of kwargs to be passed to a specific tracker\\'s `log` function. Should be formatted\\n- like this:\\n+ like so:\\n ```python\\n {\"wandb\": {\"tags\": [\"tag_a\", \"tag_b\"]}}\\n ```\\n@@ -1193,7 +1225,8 @@ def log(self, values: dict, step: Optional[int] = None, log_kwargs: Optional[dic\\n @on_main_process\\n def end_training(self):\\n \"\"\"\\n- Runs any special end training behaviors, such as stopping trackers on the main process only.\\n+ Runs any special end training behaviors, such as stopping trackers on the main process only. Should always be\\n+ called at the end of your script if using experiment tracking.\\n \"\"\"\\n for tracker in self.trackers:\\n tracker.finish()\\n@@ -1383,6 +1416,15 @@ def _get_devices(self, *args):\\n return (model_device, optimizer_device)\\n \\n def get_state_dict(self, model, unwrap=True):\\n+ \"\"\"\\n+ Returns the state dictionary of a model sent through [`Accelerator.prepare`] in full precision\\n+\\n+ Args:\\n+ model (`torch.nn.Module`):\\n+ A PyTorch model sent through [`Accelerator.prepare`]\\n+ unwrap (`bool`, *optional*, defaults to True):\\n+ Whether to return the original underlying state_dict of `model` or to return the wrapped state_dict\\n+ \"\"\"\\n is_zero_3 = False\\n if self.distributed_type == DistributedType.DEEPSPEED:\\n is_zero_3 = self.deepspeed_config[\"zero_optimization\"][\"stage\"] == 3\\n',\n", - " 'code_comments': [{'body': 'I think the other types are all subclasses of `Module`\\r\\n```suggestion\\r\\n model (`torch.nn.Module`):\\r\\n```',\n", - " 'diff_hunk': '@@ -1383,6 +1416,15 @@ def _get_devices(self, *args):\\n return (model_device, optimizer_device)\\n \\n def get_state_dict(self, model, unwrap=True):\\n+ \"\"\"\\n+ Returns the state dictionary of a model sent through [`Accelerator.prepare`] in full precision\\n+\\n+ Args:\\n+ model (`Module`, `DistributedDataParallel`, `FullyShardedDataParallel`, `MpModelWrapper`):',\n", - " 'from_author': False}],\n", - " 'context': [{'body': '_The documentation is not available anymore as the PR was closed or merged._',\n", - " 'from_author': False}],\n", - " 'url': 'https://api.github.com/repos/huggingface/accelerate/pulls/715'},\n", - " 1063333029: {'diff': 'diff --git a/src/accelerate/accelerator.py b/src/accelerate/accelerator.py\\nindex 552c8392c..bb58ca57f 100644\\n--- a/src/accelerate/accelerator.py\\n+++ b/src/accelerate/accelerator.py\\n@@ -125,8 +125,8 @@ class Accelerator:\\n - `\"tensorboard\"`\\n - `\"wandb\"`\\n - `\"comet_ml\"`\\n- If `\"all`\" is selected, will pick up all available trackers in the environment and intialize them. Can also\\n- accept implementations of `GeneralTracker` for custom trackers, and can be combined with `\"all\"`.\\n+ If `\"all\"` is selected, will pick up all available trackers in the environment and initialize them. Can\\n+ also accept implementations of `GeneralTracker` for custom trackers, and can be combined with `\"all\"`.\\n logging_dir (`str`, `os.PathLike`, *optional*):\\n A path to a directory for storing logs of locally-compatible loggers.\\n dispatch_batches (`bool`, *optional*):\\n@@ -643,7 +643,7 @@ def prepare(self, *args):\\n \"The model and the optimizer parameters are not on the same device, which probably means you \"\\n \"created an optimizer around your model **before** putting on the device. Make sure the line \"\\n \"model.to(device) is before the optimizer creation in your script or remove it entirely and use \"\\n- \"the flag default value for `devicement_placement` in your `Accelerator` to let it handle that \"\\n+ \"the flag default value for `device_placement` in your `Accelerator` to let it handle that \"\\n \"part for you.\"\\n )\\n \\ndiff --git a/src/accelerate/big_modeling.py b/src/accelerate/big_modeling.py\\nindex 3c67b6ee5..9d57c144f 100644\\n--- a/src/accelerate/big_modeling.py\\n+++ b/src/accelerate/big_modeling.py\\n@@ -44,7 +44,7 @@ def init_empty_weights(include_buffers: bool = False):\\n \\n Example:\\n \\n- ```pyton\\n+ ```python\\n import torch.nn as nn\\n from accelerate import init_empty_weights\\n \\n@@ -324,7 +324,7 @@ def load_checkpoint_and_dispatch(\\n dtype (`str` or `torch.dtype`, *optional*):\\n If provided, the weights will be converted to that type when loaded.\\n offload_state_dict (`bool`, *optional*):\\n- If `True`, will temporarily offload the CPU state dict on the hard drive to avoig getting out of CPU RAM if\\n+ If `True`, will temporarily offload the CPU state dict on the hard drive to avoid getting out of CPU RAM if\\n the weight of the CPU state dict + the biggest shard does not fit. Will default to `True` if the device map\\n picked contains `\"disk\"` values.\\n preload_module_classes (`List[str]`, *optional*):\\ndiff --git a/src/accelerate/tracking.py b/src/accelerate/tracking.py\\nindex ee6d194f1..a438ab45b 100644\\n--- a/src/accelerate/tracking.py\\n+++ b/src/accelerate/tracking.py\\n@@ -424,8 +424,8 @@ def filter_trackers(\\n - `\"tensorboard\"`\\n - `\"wandb\"`\\n - `\"comet_ml\"`\\n- If `\"all`\" is selected, will pick up all available trackers in the environment and intialize them. Can also\\n- accept implementations of `GeneralTracker` for custom trackers, and can be combined with `\"all\"`.\\n+ If `\"all\"` is selected, will pick up all available trackers in the environment and initialize them. Can\\n+ also accept implementations of `GeneralTracker` for custom trackers, and can be combined with `\"all\"`.\\n logging_dir (`str`, `os.PathLike`, *optional*):\\n A path to a directory for storing logs of locally-compatible loggers.\\n \"\"\"\\ndiff --git a/src/accelerate/utils/modeling.py b/src/accelerate/utils/modeling.py\\nindex 467a757b4..0b57ae6d8 100644\\n--- a/src/accelerate/utils/modeling.py\\n+++ b/src/accelerate/utils/modeling.py\\n@@ -617,7 +617,7 @@ def load_checkpoint_in_model(\\n dtype (`str` or `torch.dtype`, *optional*):\\n If provided, the weights will be converted to that type when loaded.\\n offload_state_dict (`bool`, *optional*, defaults to `False`):\\n- If `True`, will temporarily offload the CPU state dict on the hard drive to avoig getting out of CPU RAM if\\n+ If `True`, will temporarily offload the CPU state dict on the hard drive to avoid getting out of CPU RAM if\\n the weight of the CPU state dict + the biggest shard does not fit.\\n \"\"\"\\n if offload_folder is None and device_map is not None and \"disk\" in device_map.values():\\n',\n", - " 'code_comments': [],\n", - " 'context': [{'body': '_The documentation is not available anymore as the PR was closed or merged._',\n", - " 'from_author': False},\n", - " {'body': 'Hey @sgugger ,\\r\\n\\r\\nTrying to run your Makefile has proven really tough for me:\\r\\n1. `python` doesn\\'t exist on ubuntu. Setup a symlink for `python` -> `python3` ...\\r\\n\\r\\n2. `python3.9` running `make style` tries to change 30 files\\r\\n`30 files reformatted, 55 files left unchanged.`\\r\\n\\r\\n3. Tried to match the runner and did a `docker run ... 3.7.... -v .:/accelerate` ...\\r\\nThis still gave different results than the CI runner:\\r\\n`5 files reformatted, 80 files left unchanged.`\\r\\n\\r\\nI\\'m unable to actually get my black/isort to match what the test unhappy about.\\r\\n\\r\\nIt appears the test is failing for a line length style issue from a file that wasn\\'t actually touched in this commit... \\r\\n\\r\\nIt feels like it should have tripped on #711 -- not here. \\r\\n\\r\\n```\\r\\npython utils/style_doc.py src/accelerate docs/source --max_len 119 --check_only\\r\\nTraceback (most recent call last):\\r\\n File \"utils/style_doc.py\", line 556, in \\r\\n main(*args.files, max_len=args.max_len, check_only=args.check_only)\\r\\n File \"utils/style_doc.py\", line 544, in main\\r\\n raise ValueError(f\"{len(changed)} files should be restyled!\")\\r\\n```\\r\\n\\r\\nCan we just merge this in, and fix that style next time you lint?\\r\\n\\r\\nOr could you jump in on this one and help run the `make style` to get that file to format correctly based on your black/isort spec?\\r\\n\\r\\nIs there a docker image or something setup to run your `Makefile` in a reproducible environment? I\\'m probably missing something...',\n", - " 'from_author': True},\n", - " {'body': \"Hey @sgugger --\\r\\n\\r\\nThis is jumping the shark, but I do have a (arguably) nice pre-commit template that handles autoflake, isort, black and works well cross-platform.\\r\\n\\r\\nHere's a screenshot. If you want to re-do the makefile, I can start a new ticket and switch it to pre-commit.\\r\\n\\r\\n![image](https://user-images.githubusercontent.com/523300/191733560-c07f973d-5c9c-4087-a8b0-6303ba028169.png)\\r\\n\\r\\nThis also checks for conventional commits format(optional).\\r\\n\\r\\nThis would be a bit bigger in scope and more opinionated than what I thought I was starting with some readability PR's :)\\r\\n\\r\\nLet me know if this is something that's of interest to swapping out for the huggingface team... if so, we can throw it in a new issue.\\r\\n\",\n", - " 'from_author': True},\n", - " {'body': 'Hi @ryanrussell, I think the issue here is the wrong black/flake8 may be installed? Can you try running `make style` again after doing `pip install -e .[quality]` (or `pip install accelerate[quality]` depending on how you have it installed)?',\n", - " 'from_author': False},\n", - " {'body': \"Hi @ryanrussell. 90% on the team is on Ubuntu and have no problem with running `make style` without doing any aliases. Not sure why you don't have the `python` command available after installing Python.\\r\\n\\r\\nThen as @muellerzr said, you need to have the pinned versions we use for the libraries formatting the code, which is why we recommend using a virtual environment. I've pushed a style commit to move forward with this PR.\\r\\n\\r\\nI don't want to add pre-commit hooks as I prefer having the ability to do the styling in a separate commit when I'm afraid it might break something.\",\n", - " 'from_author': False},\n", - " {'body': \"Hey @sgugger -- thanks for helping fix this. Sounds like it's a user error on my side.\\r\\n\\r\\nI'll wait to send any more PR's until I setup a venv with correct version pins and can replicate the expected linting from the CI...\\r\\n\\r\\nJust found the discord, so joining there... if I run into any issues, much easier to iterate there than through comments :+1: \",\n", - " 'from_author': True},\n", - " {'body': 'Failures are unrelated (something in the dev of evaluate messed up) so merging!',\n", - " 'from_author': False}],\n", - " 'url': 'https://api.github.com/repos/huggingface/accelerate/pulls/713'},\n", - " 1063301116: {'diff': 'diff --git a/src/accelerate/hooks.py b/src/accelerate/hooks.py\\nindex 5c04b8621..493444daa 100644\\n--- a/src/accelerate/hooks.py\\n+++ b/src/accelerate/hooks.py\\n@@ -71,7 +71,7 @@ def post_forward(self, module, output):\\n \\n def detach_hook(self, module):\\n \"\"\"\\n- To be executed when the hook is deached from a module.\\n+ To be executed when the hook is detached from a module.\\n \\n Args:\\n module (`torch.nn.Module`): The module detached from this hook.\\n@@ -182,7 +182,7 @@ class AlignDevicesHook(ModelHook):\\n Args:\\n execution_device (`torch.device`, *optional*):\\n The device on which inputs and model weights should be placed before the forward pass.\\n- offload (`bool`, *optional*, defauts to `False`):\\n+ offload (`bool`, *optional*, defaults to `False`):\\n Whether or not the weights should be offloaded after the forward pass.\\n io_same_device (`bool`, *optional*, defaults to `False`):\\n Whether or not the output should be placed on the same device as the input was.\\n@@ -319,7 +319,7 @@ def attach_align_device_hook(\\n The module where we want to attach the hooks.\\n execution_device (`torch.device`, *optional*):\\n The device on which inputs and model weights should be placed before the forward pass.\\n- offload (`bool`, *optional*, defauts to `False`):\\n+ offload (`bool`, *optional*, defaults to `False`):\\n Whether or not the weights should be offloaded after the forward pass.\\n weights_map (`Mapping[str, torch.Tensor]`, *optional*):\\n When the model weights are offloaded, a (potentially lazy) map from param names to the tensor values.\\n@@ -402,7 +402,7 @@ def attach_align_device_hook_on_blocks(\\n execution_device (`torch.device` or `Dict[str, torch.device]`, *optional*):\\n The device on which inputs and model weights should be placed before the forward pass. It can be one device\\n for the whole module, or a dictionary mapping module name to device.\\n- offload (`bool`, *optional*, defauts to `False`):\\n+ offload (`bool`, *optional*, defaults to `False`):\\n Whether or not the weights should be offloaded after the forward pass. It can be one boolean for the whole\\n module, or a dictionary mapping module name to boolean.\\n weights_map (`Mapping[str, torch.Tensor]`, *optional*):\\n',\n", - " 'code_comments': [],\n", - " 'context': [{'body': '_The documentation is not available anymore as the PR was closed or merged._',\n", - " 'from_author': False}],\n", - " 'url': 'https://api.github.com/repos/huggingface/accelerate/pulls/712'},\n", - " 1063297381: {'diff': 'diff --git a/utils/style_doc.py b/utils/style_doc.py\\nindex 2a325805f..0422ebeb4 100644\\n--- a/utils/style_doc.py\\n+++ b/utils/style_doc.py\\n@@ -28,7 +28,7 @@\\n # Regexes\\n # Re pattern that catches list introduction (with potential indent)\\n _re_list = re.compile(r\"^(\\\\s*-\\\\s+|\\\\s*\\\\*\\\\s+|\\\\s*\\\\d+\\\\.\\\\s+)\")\\n-# Re pattern that catches code block introduction (with potentinal indent)\\n+# Re pattern that catches code block introduction (with potential indent)\\n _re_code = re.compile(r\"^(\\\\s*)```(.*)$\")\\n # Re pattern that catches rst args blocks of the form `Parameters:`.\\n _re_args = re.compile(\"^\\\\s*(Args?|Arguments?|Params?|Parameters?):\\\\s*$\")\\n@@ -62,7 +62,7 @@ def parse_code_example(code_lines):\\n \\n Args:\\n code_lines (`List[str]`): The code lines to parse.\\n- max_len (`int`): The maximum lengh per line.\\n+ max_len (`int`): The maximum length per line.\\n \\n Returns:\\n (List[`str`], List[`str`]): The list of code samples and the list of outputs.\\n@@ -109,7 +109,7 @@ def format_code_example(code: str, max_len: int, in_docstring: bool = False):\\n \\n Args:\\n code (`str`): The code example to format.\\n- max_len (`int`): The maximum lengh per line.\\n+ max_len (`int`): The maximum length per line.\\n in_docstring (`bool`, *optional*, defaults to `False`): Whether or not the code example is inside a docstring.\\n \\n Returns:\\n',\n", - " 'code_comments': [],\n", - " 'context': [{'body': '_The documentation is not available anymore as the PR was closed or merged._',\n", - " 'from_author': False}],\n", - " 'url': 'https://api.github.com/repos/huggingface/accelerate/pulls/711'},\n", - " 1063295373: {'diff': 'diff --git a/tests/test_tracking.py b/tests/test_tracking.py\\nindex 917ee329d..5e26eb9b5 100644\\n--- a/tests/test_tracking.py\\n+++ b/tests/test_tracking.py\\n@@ -85,12 +85,12 @@ def setUp(self):\\n self.add_mocks(mock.patch.dict(os.environ, {\"WANDB_DIR\": self.tmpdir}))\\n \\n @staticmethod\\n- def get_value_from_log(key: str, log: str, key_occurance: int = 0):\\n+ def get_value_from_log(key: str, log: str, key_occurrence: int = 0):\\n \"\"\"\\n Parses wandb log for `key` and returns the value.\\n- If parsing through multiple calls to .log, pass in a `key_occurance`\\n+ If parsing through multiple calls to .log, pass in a `key_occurrence`\\n \"\"\"\\n- res = re.findall(rf\"(?<={key} )[^\\\\s]+\", log)[key_occurance]\\n+ res = re.findall(rf\"(?<={key} )[^\\\\s]+\", log)[key_occurrence]\\n if \\'\"\\' in res:\\n return re.findall(r\\'\"([^\"]*)\"\\', res)[0]\\n else:\\n',\n", - " 'code_comments': [],\n", - " 'context': [{'body': '_The documentation is not available anymore as the PR was closed or merged._',\n", - " 'from_author': False}],\n", - " 'url': 'https://api.github.com/repos/huggingface/accelerate/pulls/710'},\n", - " 1063269081: {'diff': 'diff --git a/examples/by_feature/cross_validation.py b/examples/by_feature/cross_validation.py\\nindex 707c93c4c..87f804ce6 100644\\n--- a/examples/by_feature/cross_validation.py\\n+++ b/examples/by_feature/cross_validation.py\\n@@ -58,7 +58,7 @@\\n EVAL_BATCH_SIZE = 32\\n \\n # New Code #\\n-# We need a different `get_dataloaders` function that will build dataloaders by indexs\\n+# We need a different `get_dataloaders` function that will build dataloaders by index\\n \\n \\n def get_fold_dataloaders(\\n@@ -71,9 +71,9 @@ def get_fold_dataloaders(\\n accelerator (`Accelerator`):\\n The main `Accelerator` object\\n train_idxs (list of `int`):\\n- The split indicies for the training dataset\\n+ The split indices for the training dataset\\n valid_idxs (list of `int`):\\n- The split indicies for the validation dataset\\n+ The split indices for the validation dataset\\n batch_size (`int`):\\n The size of the minibatch. Default is 16\\n \"\"\"\\ndiff --git a/examples/by_feature/deepspeed_with_config_support.py b/examples/by_feature/deepspeed_with_config_support.py\\nindex 89234a911..36ace84b9 100755\\n--- a/examples/by_feature/deepspeed_with_config_support.py\\n+++ b/examples/by_feature/deepspeed_with_config_support.py\\n@@ -525,7 +525,7 @@ def group_texts(examples):\\n },\\n ]\\n # New Code #\\n- # Creates Dummy Optimizer if `optimizer` was spcified in the config file else creates Adam Optimizer\\n+ # Creates Dummy Optimizer if `optimizer` was specified in the config file else creates Adam Optimizer\\n optimizer_cls = (\\n torch.optim.AdamW\\n if accelerator.state.deepspeed_plugin is None\\n@@ -554,7 +554,7 @@ def group_texts(examples):\\n args.num_train_epochs = math.ceil(args.max_train_steps / num_update_steps_per_epoch)\\n \\n # New Code #\\n- # Creates Dummy Scheduler if `scheduler` was spcified in the config file else creates `args.lr_scheduler_type` Scheduler\\n+ # Creates Dummy Scheduler if `scheduler` was specified in the config file else creates `args.lr_scheduler_type` Scheduler\\n if (\\n accelerator.state.deepspeed_plugin is None\\n or \"scheduler\" not in accelerator.state.deepspeed_plugin.deepspeed_config\\ndiff --git a/examples/by_feature/memory.py b/examples/by_feature/memory.py\\nindex 9f0d55c69..82dc34b8a 100644\\n--- a/examples/by_feature/memory.py\\n+++ b/examples/by_feature/memory.py\\n@@ -29,7 +29,7 @@\\n ########################################################################\\n # This is a fully working simple example to use Accelerate,\\n # specifically showcasing how to ensure out-of-memory errors never\\n-# iterrupt training, and builds off the `nlp_example.py` script.\\n+# interrupt training, and builds off the `nlp_example.py` script.\\n #\\n # This example trains a Bert base model on GLUE MRPC\\n # in any of the following settings (with the same script):\\n',\n", - " 'code_comments': [],\n", - " 'context': [{'body': '_The documentation is not available anymore as the PR was closed or merged._',\n", - " 'from_author': False}],\n", - " 'url': 'https://api.github.com/repos/huggingface/accelerate/pulls/709'},\n", - " 1062918270: {'diff': 'diff --git a/docs/source/usage_guides/sagemaker.mdx b/docs/source/usage_guides/sagemaker.mdx\\nindex 6195783e9..0afe52e29 100644\\n--- a/docs/source/usage_guides/sagemaker.mdx\\n+++ b/docs/source/usage_guides/sagemaker.mdx\\n@@ -129,7 +129,26 @@ You can find your model data at: s3://your-bucket/accelerate-sagemaker-1-2021-04\\n \\n ### Distributed Training: Data Parallelism\\n \\n-*currently in development, will be supported soon.*\\n+Set up the accelerate config by running `accelerate config` and answer the SageMaker questions and set it up.\\n+To use SageMaker DDP, select it when asked \\n+`What is the distributed mode? ([0] No distributed training, [1] data parallelism):`.\\n+Example config below:\\n+```yaml\\n+base_job_name: accelerate-sagemaker-1\\n+compute_environment: AMAZON_SAGEMAKER\\n+distributed_type: DATA_PARALLEL\\n+ec2_instance_type: ml.p3.16xlarge\\n+iam_role_name: xxxxx\\n+image_uri: null\\n+mixed_precision: fp16\\n+num_machines: 1\\n+profile: xxxxx\\n+py_version: py38\\n+pytorch_version: 1.10.2\\n+region: us-east-1\\n+transformers_version: 4.17.0\\n+use_cpu: false\\n+```\\n \\n ### Distributed Training: Model Parallelism\\n \\ndiff --git a/examples/README.md b/examples/README.md\\nindex c4f050b3f..6a3c0a11a 100644\\n--- a/examples/README.md\\n+++ b/examples/README.md\\n@@ -187,6 +187,9 @@ To run it in each of these various modes, use the following commands:\\n ### Simple vision example (GANs)\\n \\n - [huggan project](https://github.com/huggingface/community-events/tree/main/huggan)\\n+\\n+### Using AWS SageMaker integration\\n+- [Examples showcasing AWS SageMaker integration of 🤗 Accelerate.](https://github.com/pacman100/accelerate-aws-sagemaker)\\n \\n ## Finer Examples\\n \\ndiff --git a/src/accelerate/commands/config/sagemaker.py b/src/accelerate/commands/config/sagemaker.py\\nindex 786881edf..b3a45c9e4 100644\\n--- a/src/accelerate/commands/config/sagemaker.py\\n+++ b/src/accelerate/commands/config/sagemaker.py\\n@@ -157,7 +157,7 @@ def get_sagemaker_input():\\n )\\n \\n distributed_type = _ask_field(\\n- \"Which type of machine are you using? ([0] No distributed training, [1] data parallelism): \",\\n+ \"What is the distributed mode? ([0] No distributed training, [1] data parallelism): \",\\n _convert_sagemaker_distributed_mode,\\n error_message=\"Please enter 0 or 1\",\\n )\\n',\n", - " 'code_comments': [],\n", - " 'context': [{'body': '_The documentation is not available anymore as the PR was closed or merged._',\n", - " 'from_author': False}],\n", - " 'url': 'https://api.github.com/repos/huggingface/accelerate/pulls/708'},\n", - " 1061646486: {'diff': 'diff --git a/.github/workflows/nightly.yml b/.github/workflows/nightly.yml\\nindex cb0a7ee42..277a81df9 100644\\n--- a/.github/workflows/nightly.yml\\n+++ b/.github/workflows/nightly.yml\\n@@ -7,6 +7,7 @@ on:\\n \\n env:\\n RUN_SLOW: \"yes\"\\n+ IS_GITHUB_CI: \"1\"\\n \\n jobs:\\n run_all_tests_single_gpu:\\n@@ -27,6 +28,7 @@ jobs:\\n git config --global --add safe.directory \\'*\\'\\n git fetch && git checkout ${{ github.sha }} \\n pip install -e . --no-deps\\n+ pip install pytest-reportlog\\n \\n - name: Run test on GPUs\\n run: |\\n@@ -37,6 +39,11 @@ jobs:\\n source activate accelerate\\n pip uninstall comet_ml -y\\n make test_examples\\n+ \\n+ - name: Generate Report\\n+ if: always()\\n+ run: |\\n+ python utils/log_reports.py >> $GITHUB_STEP_SUMMARY\\n \\n run_all_tests_multi_gpu:\\n runs-on: [self-hosted, docker-gpu, multi-gpu]\\n@@ -56,6 +63,7 @@ jobs:\\n git config --global --add safe.directory \\'*\\'\\n git fetch && git checkout ${{ github.sha }}\\n pip install -e . --no-deps\\n+ pip install pytest-reportlog\\n \\n - name: Run core and big modeling tests on GPUs\\n run: |\\n@@ -72,4 +80,9 @@ jobs:\\n run: |\\n source activate accelerate\\n pip uninstall comet_ml -y\\n- make test_examples\\n\\\\ No newline at end of file\\n+ make test_examples\\n+\\n+ - name: Generate Report\\n+ if: always()\\n+ run: |\\n+ python utils/log_reports.py >> $GITHUB_STEP_SUMMARY\\n\\\\ No newline at end of file\\ndiff --git a/.github/workflows/on-merge.yml b/.github/workflows/on-merge.yml\\nindex 028b99d9a..a794cd72b 100644\\n--- a/.github/workflows/on-merge.yml\\n+++ b/.github/workflows/on-merge.yml\\n@@ -6,6 +6,7 @@ on:\\n \\n env:\\n TESTING_MOCKED_DATALOADERS: \"1\"\\n+ IS_GITHUB_CI: \"1\"\\n \\n jobs:\\n run_all_tests_single_gpu:\\n@@ -26,6 +27,7 @@ jobs:\\n git config --global --add safe.directory \\'*\\'\\n git fetch && git checkout ${{ github.sha }}\\n pip install -e .[testing,test_trackers]\\n+ pip install pytest-reportlog\\n \\n - name: Run test on GPUs\\n run: |\\n@@ -37,6 +39,11 @@ jobs:\\n pip uninstall comet_ml -y\\n make test_examples\\n \\n+ - name: Generate Report\\n+ if: always()\\n+ run: |\\n+ python utils/log_reports.py >> $GITHUB_STEP_SUMMARY\\n+\\n run_all_tests_multi_gpu:\\n runs-on: [self-hosted, docker-gpu, multi-gpu]\\n container:\\n@@ -53,6 +60,7 @@ jobs:\\n git config --global --add safe.directory \\'*\\'\\n git fetch && git checkout ${{ github.sha }}\\n pip install -e .[testing,test_trackers]\\n+ pip install pytest-reportlog\\n \\n - name: Run test on GPUs\\n run: |\\n@@ -63,4 +71,9 @@ jobs:\\n run: |\\n source activate accelerate\\n pip uninstall comet_ml -y\\n- make test_examples\\n\\\\ No newline at end of file\\n+ make test_examples\\n+\\n+ - name: Generate Report\\n+ if: always()\\n+ run: |\\n+ python utils/log_reports.py >> $GITHUB_STEP_SUMMARY\\n\\\\ No newline at end of file\\ndiff --git a/.github/workflows/test.yml b/.github/workflows/test.yml\\nindex 2cf8e34a7..d39d36951 100644\\n--- a/.github/workflows/test.yml\\n+++ b/.github/workflows/test.yml\\n@@ -5,7 +5,7 @@ on: [pull_request]\\n env:\\n HF_HOME: ~/hf_cache\\n TESTING_MOCKED_DATALOADERS: \"1\"\\n- CI: 1\\n+ IS_GITHUB_CI: \"1\"\\n \\n jobs:\\n run-tests:\\ndiff --git a/Makefile b/Makefile\\nindex 0c9ad0ca9..52c594ae7 100644\\n--- a/Makefile\\n+++ b/Makefile\\n@@ -25,40 +25,40 @@ style:\\n \\t\\n # Run tests for the library\\n test:\\n-\\tpython -m pytest -s -v ./tests/ --ignore=./tests/test_examples.py $(if $(CI),--report-log \\'all.log\\',)\\n+\\tpython -m pytest -s -v ./tests/ --ignore=./tests/test_examples.py $(if $(IS_GITHUB_CI),--report-log \\'all.log\\',)\\n \\n test_big_modeling:\\n-\\tpython -m pytest -s -v ./tests/test_big_modeling.py $(if $(CI),--report-log \\'big_modeling.log\\',)\\n+\\tpython -m pytest -s -v ./tests/test_big_modeling.py $(if $(IS_GITHUB_CI),--report-log \\'big_modeling.log\\',)\\n \\n test_core:\\n \\tpython -m pytest -s -v ./tests/ --ignore=./tests/test_examples.py --ignore=./tests/deepspeed --ignore=./tests/test_big_modeling.py \\\\\\n-\\t--ignore=./tests/fsdp $(if $(CI),--report-log \\'core.log\\',)\\n+\\t--ignore=./tests/fsdp $(if $(IS_GITHUB_CI),--report-log \\'core.log\\',)\\n \\n test_deepspeed:\\n-\\tpython -m pytest -s -v ./tests/deepspeed $(if $(CI),--report-log \\'deepspeed.log\\',)\\n+\\tpython -m pytest -s -v ./tests/deepspeed $(if $(IS_GITHUB_CI),--report-log \\'deepspeed.log\\',)\\n \\n test_fsdp:\\n-\\tpython -m pytest -s -v ./tests/fsdp $(if $(CI),--report-log \\'fsdp.log\\',)\\n+\\tpython -m pytest -s -v ./tests/fsdp $(if $(IS_GITHUB_CI),--report-log \\'fsdp.log\\',)\\n \\n test_examples:\\n-\\tpython -m pytest -s -v ./tests/test_examples.py $(if $(CI),--report-log \\'examples.log\\',)\\n+\\tpython -m pytest -s -v ./tests/test_examples.py $(if $(IS_GITHUB_CI),--report-log \\'examples.log\\',)\\n \\n # Broken down example tests for the CI runners\\n test_integrations:\\n-\\tpython -m pytest -s -v ./tests/deepspeed ./tests/fsdp $(if $(CI),--report-log \\'integrations.log\\',)\\n+\\tpython -m pytest -s -v ./tests/deepspeed ./tests/fsdp $(if $(IS_GITHUB_CI),--report-log \\'integrations.log\\',)\\n \\n test_example_differences:\\n-\\tpython -m pytest -s -v ./tests/test_examples.py::ExampleDifferenceTests $(if $(CI),--report-log \\'example_diff.log\\',)\\n+\\tpython -m pytest -s -v ./tests/test_examples.py::ExampleDifferenceTests $(if $(IS_GITHUB_CI),--report-log \\'example_diff.log\\',)\\n \\n test_checkpoint_epoch:\\n-\\tpython -m pytest -s -v ./tests/test_examples.py::FeatureExamplesTests -k \"by_epoch\" $(if $(CI),--report-log \\'checkpoint_epoch.log\\',)\\n+\\tpython -m pytest -s -v ./tests/test_examples.py::FeatureExamplesTests -k \"by_epoch\" $(if $(IS_GITHUB_CI),--report-log \\'checkpoint_epoch.log\\',)\\n \\n test_checkpoint_step:\\n-\\tpython -m pytest -s -v ./tests/test_examples.py::FeatureExamplesTests -k \"by_step\" $(if $(CI),--report-log \\'checkpoint_step.log\\',)\\n+\\tpython -m pytest -s -v ./tests/test_examples.py::FeatureExamplesTests -k \"by_step\" $(if $(IS_GITHUB_CI),--report-log \\'checkpoint_step.log\\',)\\n \\n # Same as test but used to install only the base dependencies\\n test_prod:\\n \\t$(MAKE) test_core\\n \\n test_rest:\\n-\\tpython -m pytest -s -v ./tests/test_examples.py::FeatureExamplesTests -k \"not by_step and not by_epoch\" $(if $(CI),--report-log \\'rest.log\\',)\\n+\\tpython -m pytest -s -v ./tests/test_examples.py::FeatureExamplesTests -k \"not by_step and not by_epoch\" $(if $(IS_GITHUB_CI),--report-log \\'rest.log\\',)\\n',\n", - " 'code_comments': [],\n", - " 'context': [{'body': '_The documentation is not available anymore as the PR was closed or merged._',\n", - " 'from_author': False}],\n", - " 'url': 'https://api.github.com/repos/huggingface/accelerate/pulls/704'},\n", - " 1060736877: {'diff': 'diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml\\nindex 3fec513d2..2cf8e34a7 100644\\n--- a/.github/workflows/test.yml\\n+++ b/.github/workflows/test.yml\\n@@ -5,11 +5,13 @@ on: [pull_request]\\n env:\\n HF_HOME: ~/hf_cache\\n TESTING_MOCKED_DATALOADERS: \"1\"\\n+ CI: 1\\n \\n jobs:\\n run-tests:\\n runs-on: ubuntu-latest\\n strategy:\\n+ fail-fast: false\\n matrix:\\n pytorch-version: [\\n latest,\\n@@ -48,7 +50,13 @@ jobs:\\n if [[ ${{ matrix.test-kind }} != test_prod ]]; then pip install -e .[testing,test_trackers]; fi\\n if [[ ${{ matrix.test-kind }} = test_rest ]]; then pip uninstall comet_ml -y; fi\\n if [[ ${{ matrix.pytorch-version }} = minimum ]]; then pip install torch==1.6.0; fi\\n+ pip install pytest-reportlog\\n \\n - name: Run Tests\\n run: |\\n- make ${{ matrix.test-kind }}\\n\\\\ No newline at end of file\\n+ make ${{ matrix.test-kind }}\\n+\\n+ - name: Generate Report\\n+ if: always()\\n+ run: |\\n+ python utils/log_reports.py >> $GITHUB_STEP_SUMMARY\\n\\\\ No newline at end of file\\ndiff --git a/Makefile b/Makefile\\nindex c1c79d6e8..0c9ad0ca9 100644\\n--- a/Makefile\\n+++ b/Makefile\\n@@ -25,39 +25,40 @@ style:\\n \\t\\n # Run tests for the library\\n test:\\n-\\tpython -m pytest -s -v ./tests/ --ignore=./tests/test_examples.py\\n+\\tpython -m pytest -s -v ./tests/ --ignore=./tests/test_examples.py $(if $(CI),--report-log \\'all.log\\',)\\n \\n test_big_modeling:\\n-\\tpython -m pytest -s -v ./tests/test_big_modeling.py\\n+\\tpython -m pytest -s -v ./tests/test_big_modeling.py $(if $(CI),--report-log \\'big_modeling.log\\',)\\n \\n test_core:\\n \\tpython -m pytest -s -v ./tests/ --ignore=./tests/test_examples.py --ignore=./tests/deepspeed --ignore=./tests/test_big_modeling.py \\\\\\n-\\t--ignore=./tests/fsdp\\n+\\t--ignore=./tests/fsdp $(if $(CI),--report-log \\'core.log\\',)\\n \\n test_deepspeed:\\n-\\tpython -m pytest -s -v ./tests/deepspeed\\n+\\tpython -m pytest -s -v ./tests/deepspeed $(if $(CI),--report-log \\'deepspeed.log\\',)\\n \\n test_fsdp:\\n-\\tpython -m pytest -s -v ./tests/fsdp\\n+\\tpython -m pytest -s -v ./tests/fsdp $(if $(CI),--report-log \\'fsdp.log\\',)\\n \\n test_examples:\\n-\\tpython -m pytest -s -v ./tests/test_examples.py\\n+\\tpython -m pytest -s -v ./tests/test_examples.py $(if $(CI),--report-log \\'examples.log\\',)\\n \\n # Broken down example tests for the CI runners\\n test_integrations:\\n-\\tpython -m pytest -s -v ./tests/deepspeed ./tests/fsdp\\n+\\tpython -m pytest -s -v ./tests/deepspeed ./tests/fsdp $(if $(CI),--report-log \\'integrations.log\\',)\\n+\\n test_example_differences:\\n-\\tpython -m pytest -s -v ./tests/test_examples.py::ExampleDifferenceTests\\n+\\tpython -m pytest -s -v ./tests/test_examples.py::ExampleDifferenceTests $(if $(CI),--report-log \\'example_diff.log\\',)\\n \\n test_checkpoint_epoch:\\n-\\tpython -m pytest -s -v ./tests/test_examples.py::FeatureExamplesTests -k \"by_epoch\"\\n+\\tpython -m pytest -s -v ./tests/test_examples.py::FeatureExamplesTests -k \"by_epoch\" $(if $(CI),--report-log \\'checkpoint_epoch.log\\',)\\n \\n test_checkpoint_step:\\n-\\tpython -m pytest -s -v ./tests/test_examples.py::FeatureExamplesTests -k \"by_step\"\\n+\\tpython -m pytest -s -v ./tests/test_examples.py::FeatureExamplesTests -k \"by_step\" $(if $(CI),--report-log \\'checkpoint_step.log\\',)\\n \\n # Same as test but used to install only the base dependencies\\n test_prod:\\n \\t$(MAKE) test_core\\n \\n test_rest:\\n-\\tpython -m pytest -s -v ./tests/test_examples.py::FeatureExamplesTests -k \"not by_step and not by_epoch\"\\n+\\tpython -m pytest -s -v ./tests/test_examples.py::FeatureExamplesTests -k \"not by_step and not by_epoch\" $(if $(CI),--report-log \\'rest.log\\',)\\ndiff --git a/utils/log_reports.py b/utils/log_reports.py\\nnew file mode 100644\\nindex 000000000..f701f08c2\\n--- /dev/null\\n+++ b/utils/log_reports.py\\n@@ -0,0 +1,34 @@\\n+import json\\n+from pathlib import Path \\n+import subprocess\\n+\\n+failed = []\\n+passed = []\\n+\\n+group_info = []\\n+\\n+total_num_failed = 0\\n+for log in Path().glob(\"*.log\"):\\n+ section_num_failed = 0\\n+ with open(log, \"r\") as f:\\n+ for line in f:\\n+ line = json.loads(line)\\n+ if line.get(\"nodeid\", \"\") != \"\":\\n+ test = line[\"nodeid\"]\\n+ if line.get(\"duration\", None) is not None:\\n+ duration = f\\'{line[\"duration\"]:.4f}\\'\\n+ if line.get(\"outcome\", \"\") == \"failed\":\\n+ section_num_failed += 1\\n+ failed.append([test, duration])\\n+ else:\\n+ passed.append([test, duration])\\n+ group_info.append([str(log), section_num_failed])\\n+\\n+if len(failed) > 0:\\n+ result = \"## Failed Tests:\\\\n\"\\n+ failed_table = \\'| Test Location | Test Class | Test Name |\\\\n|---|---|---|\\\\n| \\'\\n+ for test in failed:\\n+ failed_table += \\' | \\'.join(test[0].split(\"::\"))\\n+ failed_table += \" |\"\\n+ result += failed_table\\n+ print(result)\\n\\\\ No newline at end of file\\n',\n", - " 'code_comments': [],\n", - " 'context': [{'body': '_The documentation is not available anymore as the PR was closed or merged._',\n", - " 'from_author': False}],\n", - " 'url': 'https://api.github.com/repos/huggingface/accelerate/pulls/703'},\n", - " 1058450492: {'diff': 'diff --git a/docs/source/usage_guides/memory.mdx b/docs/source/usage_guides/memory.mdx\\nindex 9c5674977..8841cd584 100644\\n--- a/docs/source/usage_guides/memory.mdx\\n+++ b/docs/source/usage_guides/memory.mdx\\n@@ -48,4 +48,4 @@ def training_function(args):\\n + inner_training_loop()\\n ```\\n \\n-To find out more, check the documentation [here](package_reference/utilities#accelerate.find_executable_batch_size)\\n\\\\ No newline at end of file\\n+To find out more, check the documentation [here](../package_reference/utilities#accelerate.find_executable_batch_size).\\n',\n", - " 'code_comments': [],\n", - " 'context': [{'body': 'Hello @tomaarsen, looks good, thanks for your contribution 😄',\n", - " 'from_author': False},\n", - " {'body': '_The documentation is not available anymore as the PR was closed or merged._',\n", - " 'from_author': False},\n", - " {'body': 'Thanks a lot!', 'from_author': False}],\n", - " 'url': 'https://api.github.com/repos/huggingface/accelerate/pulls/702'},\n", - " 1056024034: {'diff': 'diff --git a/src/accelerate/big_modeling.py b/src/accelerate/big_modeling.py\\nindex 311111ad4..0750248de 100644\\n--- a/src/accelerate/big_modeling.py\\n+++ b/src/accelerate/big_modeling.py\\n@@ -75,15 +75,35 @@ def register_empty_buffer(module, name, buffer):\\n if buffer is not None:\\n module._buffers[name] = module._buffers[name].to(torch.device(\"meta\"))\\n \\n+ # Patch tensor creation\\n+ if include_buffers:\\n+ tensor_constructors_to_patch = {\\n+ torch_function_name: getattr(torch, torch_function_name)\\n+ for torch_function_name in [\"empty\", \"zeros\", \"ones\", \"full\"]\\n+ }\\n+ else:\\n+ tensor_constructors_to_patch = {}\\n+\\n+ def patch_tensor_constructor(fn):\\n+ def wrapper(*args, **kwargs):\\n+ kwargs[\"device\"] = torch.device(\"meta\")\\n+ return fn(*args, **kwargs)\\n+\\n+ return wrapper\\n+\\n try:\\n nn.Module.register_parameter = register_empty_parameter\\n if include_buffers:\\n nn.Module.register_buffer = register_empty_buffer\\n+ for torch_function_name in tensor_constructors_to_patch.keys():\\n+ setattr(torch, torch_function_name, patch_tensor_constructor(getattr(torch, torch_function_name)))\\n yield\\n finally:\\n nn.Module.register_parameter = old_register_parameter\\n if include_buffers:\\n nn.Module.register_buffer = old_register_buffer\\n+ for torch_function_name, old_torch_function in tensor_constructors_to_patch.items():\\n+ setattr(torch, torch_function_name, old_torch_function)\\n \\n \\n def cpu_offload(\\n',\n", - " 'code_comments': [],\n", - " 'context': [{'body': '_The documentation is not available anymore as the PR was closed or merged._',\n", - " 'from_author': False},\n", - " {'body': 'I don\\'t really understand why this is needed: while load a pretrained model inside the context manager and complain it takes time?\\r\\n```py\\r\\nfrom accelerate import init_empty_weights\\r\\nfrom transformers import AutoConfig, AutoModel\\r\\n\\r\\nconfig = AutoConfig.from_pretrained(\"gpt2\")\\r\\n\\r\\nwith init_empty_weights():\\r\\n model = AutoModel.from_config(config)\\r\\n```\\r\\nis way faster than 6s',\n", - " 'from_author': False},\n", - " {'body': \"Hum it's doing it 10 times, so 0.6 sec per load. Benchmarking your solution displays the same order of magnitude: `14.567796499999996` (I think the difference is that the use of config doesn't require to read from disk the checkpoint anymore)\\r\\n\\r\\nThough your workaround removed the need to override `torch.load` and especially the hack that I introduced.\\r\\n```\\r\\nUsing config + this PR: 4.1660165419999995\\r\\n```\",\n", - " 'from_author': True},\n", - " {'body': \"Also I'm not sure why this wasn't detected, but the test `tests/test_big_modeling.py::BigModelingTester::test_init_empty_weights` pass on my MAC contrary to CI ... Moving back to draft as I need to figure this one out.\",\n", - " 'from_author': True},\n", - " {'body': \"Actually if we activate this feature only when `include_buffers=True` (I'm guessing the assumption is that all pytorch tensors are expected to be `meta`) then that should be fine.\",\n", - " 'from_author': True}],\n", - " 'url': 'https://api.github.com/repos/huggingface/accelerate/pulls/699'},\n", - " 1055539565: {'diff': 'diff --git a/src/accelerate/accelerator.py b/src/accelerate/accelerator.py\\nindex b17ea387d..552c8392c 100644\\n--- a/src/accelerate/accelerator.py\\n+++ b/src/accelerate/accelerator.py\\n@@ -747,6 +747,12 @@ def _prepare_deepspeed(self, *args):\\n batch_size_per_device = deepspeed_plugin.deepspeed_config[\"train_micro_batch_size_per_gpu\"]\\n result = [obj for obj in args]\\n \\n+ if self.gradient_accumulation_steps != deepspeed_plugin.deepspeed_config[\"gradient_accumulation_steps\"]:\\n+ logger.info(\\n+ f\"Updating DeepSpeed\\'s gradient accumulation steps to {self.gradient_accumulation_steps} from \"\\n+ f\"{deepspeed_plugin.deepspeed_config[\\'gradient_accumulation_steps\\']}.\"\\n+ )\\n+ deepspeed_plugin.deepspeed_config[\"gradient_accumulation_steps\"] = self.gradient_accumulation_steps\\n config_kwargs = {\\n \"train_micro_batch_size_per_gpu\": batch_size_per_device,\\n \"train_batch_size\": batch_size_per_device\\n',\n", - " 'code_comments': [{'body': 'No need for a warning here, I think a logging info is sufficient (the fact that args to the `Accelerator` supercede the config is a common behavior, you can do it to force CPU or a given mixed precision already).',\n", - " 'diff_hunk': '@@ -747,6 +747,12 @@ def _prepare_deepspeed(self, *args):\\n batch_size_per_device = deepspeed_plugin.deepspeed_config[\"train_micro_batch_size_per_gpu\"]\\n result = [obj for obj in args]\\n \\n+ if self.gradient_accumulation_steps != deepspeed_plugin.deepspeed_config[\"gradient_accumulation_steps\"]:\\n+ warnings.warn(',\n", - " 'from_author': False},\n", - " {'body': 'Done.',\n", - " 'diff_hunk': '@@ -747,6 +747,12 @@ def _prepare_deepspeed(self, *args):\\n batch_size_per_device = deepspeed_plugin.deepspeed_config[\"train_micro_batch_size_per_gpu\"]\\n result = [obj for obj in args]\\n \\n+ if self.gradient_accumulation_steps != deepspeed_plugin.deepspeed_config[\"gradient_accumulation_steps\"]:\\n+ warnings.warn(',\n", - " 'from_author': True}],\n", - " 'context': [{'body': '_The documentation is not available anymore as the PR was closed or merged._',\n", - " 'from_author': False}],\n", - " 'url': 'https://api.github.com/repos/huggingface/accelerate/pulls/698'},\n", - " 1053039468: {'diff': 'diff --git a/src/accelerate/data_loader.py b/src/accelerate/data_loader.py\\nindex 84c2ebd67..1eb8ff79d 100644\\n--- a/src/accelerate/data_loader.py\\n+++ b/src/accelerate/data_loader.py\\n@@ -364,10 +364,11 @@ def __iter__(self):\\n \\n @property\\n def total_batch_size(self):\\n+ batch_sampler = self.sampler if isinstance(self.sampler, BatchSampler) else self.batch_sampler\\n return (\\n- self.batch_sampler.batch_size\\n- if self.batch_sampler.split_batches\\n- else (self.batch_sampler.batch_size * self.batch_sampler.num_processes)\\n+ batch_sampler.batch_size\\n+ if batch_sampler.split_batches\\n+ else (batch_sampler.batch_size * batch_sampler.num_processes)\\n )\\n \\n @property\\n@@ -639,14 +640,17 @@ def prepare_data_loader(\\n )\\n else:\\n # New batch sampler for the current process.\\n- if hasattr(dataloader.sampler, \"generator\"):\\n- if dataloader.sampler.generator is None:\\n- dataloader.sampler.generator = torch.Generator()\\n- generator = dataloader.sampler.generator\\n- generator.manual_seed(int(torch.empty((), dtype=torch.int64).random_().item()))\\n- elif getattr(dataloader.batch_sampler, \"generator\", None) is not None:\\n- generator = dataloader.batch_sampler.generator\\n sampler_is_batch_sampler = isinstance(dataloader.sampler, BatchSampler)\\n+ if sampler_is_batch_sampler:\\n+ sampler = dataloader.sampler.sampler\\n+ else:\\n+ sampler = dataloader.batch_sampler.sampler\\n+ if hasattr(sampler, \"generator\"):\\n+ if sampler.generator is None:\\n+ sampler.generator = torch.Generator()\\n+ generator = sampler.generator\\n+ generator.manual_seed(int(torch.empty((), dtype=torch.int64).random_().item()))\\n+\\n batch_sampler = dataloader.sampler if sampler_is_batch_sampler else dataloader.batch_sampler\\n new_batch_sampler = BatchSamplerShard(\\n batch_sampler,\\n@@ -692,6 +696,7 @@ def prepare_data_loader(\\n new_dataset,\\n device=device if put_on_device and state.distributed_type != DistributedType.TPU else None,\\n sampler=new_batch_sampler,\\n+ batch_size=getattr(dataloader, \"batch_size\", _PYTORCH_DATALOADER_KWARGS[\"batch_size\"]),\\n rng_types=rng_types,\\n generator=generator,\\n **kwargs,\\n',\n", - " 'code_comments': [{'body': 'I think we should use a instance check on the sampler here, as in my tests, there was a `batch_sampler` when using a `BatchSampler` for `sampler`:\\r\\n```py\\r\\nbatch_sampler = self.sampler if isinstance(self.sampler, BatchSampler) else self.batch_sampler\\r\\n```',\n", - " 'diff_hunk': '@@ -364,10 +364,14 @@ def __iter__(self):\\n \\n @property\\n def total_batch_size(self):\\n+ if self.batch_sampler is None:\\n+ batch_sampler = self.sampler\\n+ else:\\n+ batch_sampler = self.batch_sampler',\n", - " 'from_author': False},\n", - " {'body': 'Would reuse the same `batch_size` as for the original `datalaoder` here.',\n", - " 'diff_hunk': '@@ -692,6 +699,7 @@ def prepare_data_loader(\\n new_dataset,\\n device=device if put_on_device and state.distributed_type != DistributedType.TPU else None,\\n sampler=new_batch_sampler,\\n+ batch_size=None,',\n", - " 'from_author': False},\n", - " {'body': \"This is necessary else `batch_sampler` would get created with `batch_size=1` when it shouldn't do that as the `sampler` is already an instance of `batch_sampler`. This line here: https://github.com/pytorch/pytorch/blob/master/torch/utils/data/dataloader.py#L357\\r\\n\\r\\nThis leads to the issue `1.a` specified in the PR description\\r\\n\",\n", - " 'diff_hunk': '@@ -692,6 +699,7 @@ def prepare_data_loader(\\n new_dataset,\\n device=device if put_on_device and state.distributed_type != DistributedType.TPU else None,\\n sampler=new_batch_sampler,\\n+ batch_size=None,',\n", - " 'from_author': True},\n", - " {'body': \"But it's not our place to fix users mistakes and they might have wanted to use `batch_size=1` with this for some reason instead of `batch_size=None`.\",\n", - " 'diff_hunk': '@@ -692,6 +699,7 @@ def prepare_data_loader(\\n new_dataset,\\n device=device if put_on_device and state.distributed_type != DistributedType.TPU else None,\\n sampler=new_batch_sampler,\\n+ batch_size=None,',\n", - " 'from_author': False},\n", - " {'body': \"Here, the Dataloader default is batch_size=1 and not the one fixed by user. The one fixed by user is used which is part of sampler which is an instance of BatchSampler. When sampler is an object of BatchSampler, batch_sampler shouldn't be created and for that batch_size to dataloader has to be None so that dataloader can use sampler directly to create batches. \",\n", - " 'diff_hunk': '@@ -692,6 +699,7 @@ def prepare_data_loader(\\n new_dataset,\\n device=device if put_on_device and state.distributed_type != DistributedType.TPU else None,\\n sampler=new_batch_sampler,\\n+ batch_size=None,',\n", - " 'from_author': True},\n", - " {'body': 'Yes, `batch_size` should be None, but in the examples of the issue linked above, the user did not do that. We should honor the same argument as was passed to the original dataloader which is saved [here](https://github.com/pytorch/pytorch/blob/d2d145a40001d1e1f815a144160bd0b8d0f60ea0/torch/utils/data/dataloader.py#L361).',\n", - " 'diff_hunk': '@@ -692,6 +699,7 @@ def prepare_data_loader(\\n new_dataset,\\n device=device if put_on_device and state.distributed_type != DistributedType.TPU else None,\\n sampler=new_batch_sampler,\\n+ batch_size=None,',\n", - " 'from_author': False},\n", - " {'body': \"Got it. However, even if the user passes `None` to dataloader, it isn't being passed to the final `DataLoaderShard` as batch_size is part of `ignore_kwargs` and that leads to default value of 1 being used. https://github.com/huggingface/accelerate/blob/main/src/accelerate/data_loader.py#L659\",\n", - " 'diff_hunk': '@@ -692,6 +699,7 @@ def prepare_data_loader(\\n new_dataset,\\n device=device if put_on_device and state.distributed_type != DistributedType.TPU else None,\\n sampler=new_batch_sampler,\\n+ batch_size=None,',\n", - " 'from_author': True},\n", - " {'body': 'Yes when there is a `batch_sampler`, because PyTorch raises an error and does not let us do that. But here we only pass a sampler ;-)',\n", - " 'diff_hunk': '@@ -692,6 +699,7 @@ def prepare_data_loader(\\n new_dataset,\\n device=device if put_on_device and state.distributed_type != DistributedType.TPU else None,\\n sampler=new_batch_sampler,\\n+ batch_size=None,',\n", - " 'from_author': False},\n", - " {'body': 'Done.',\n", - " 'diff_hunk': '@@ -364,10 +364,14 @@ def __iter__(self):\\n \\n @property\\n def total_batch_size(self):\\n+ if self.batch_sampler is None:\\n+ batch_sampler = self.sampler\\n+ else:\\n+ batch_sampler = self.batch_sampler',\n", - " 'from_author': True},\n", - " {'body': 'Done.',\n", - " 'diff_hunk': '@@ -692,6 +699,7 @@ def prepare_data_loader(\\n new_dataset,\\n device=device if put_on_device and state.distributed_type != DistributedType.TPU else None,\\n sampler=new_batch_sampler,\\n+ batch_size=None,',\n", - " 'from_author': True},\n", - " {'body': 'If `(batch_size != 1 and batch_size is not None) and sampler_is_batch_sampler`, is there any \"default\" way to shard batches without explicit instruction from user? I think at least a warning should be raised.',\n", - " 'diff_hunk': '@@ -692,6 +696,7 @@ def prepare_data_loader(\\n new_dataset,\\n device=device if put_on_device and state.distributed_type != DistributedType.TPU else None,\\n sampler=new_batch_sampler,\\n+ batch_size=getattr(dataloader, \"batch_size\", _PYTORCH_DATALOADER_KWARGS[\"batch_size\"]),',\n", - " 'from_author': False},\n", - " {'body': 'BTW, `hasattr(dataloader, \"batch_size\")` is always `True` for `DataLoader`.',\n", - " 'diff_hunk': '@@ -692,6 +696,7 @@ def prepare_data_loader(\\n new_dataset,\\n device=device if put_on_device and state.distributed_type != DistributedType.TPU else None,\\n sampler=new_batch_sampler,\\n+ batch_size=getattr(dataloader, \"batch_size\", _PYTORCH_DATALOADER_KWARGS[\"batch_size\"]),',\n", - " 'from_author': False},\n", - " {'body': '> BTW, `hasattr(dataloader, \"batch_size\")` is always `True` for `DataLoader`.\\r\\n\\r\\nresolving this in the other PR. wrt warning, current logic follows PyTorch API and as such warning isn\\'t warranted.',\n", - " 'diff_hunk': '@@ -692,6 +696,7 @@ def prepare_data_loader(\\n new_dataset,\\n device=device if put_on_device and state.distributed_type != DistributedType.TPU else None,\\n sampler=new_batch_sampler,\\n+ batch_size=getattr(dataloader, \"batch_size\", _PYTORCH_DATALOADER_KWARGS[\"batch_size\"]),',\n", - " 'from_author': True}],\n", - " 'context': [{'body': '_The documentation is not available anymore as the PR was closed or merged._',\n", - " 'from_author': False}],\n", - " 'url': 'https://api.github.com/repos/huggingface/accelerate/pulls/696'},\n", - " 1052863881: {'diff': 'diff --git a/docs/source/usage_guides/mps.mdx b/docs/source/usage_guides/mps.mdx\\nindex 3648bdce8..7a7f8a628 100644\\n--- a/docs/source/usage_guides/mps.mdx\\n+++ b/docs/source/usage_guides/mps.mdx\\n@@ -19,7 +19,7 @@ This will map computational graphs and primitives on the MPS Graph framework and\\n For more information please refer official documents [Introducing Accelerated PyTorch Training on Mac](https://pytorch.org/blog/introducing-accelerated-pytorch-training-on-mac/)\\n and [MPS BACKEND](https://pytorch.org/docs/stable/notes/mps.html).\\n \\n-### Benefits of Training and Inference using Apple M1 Chips\\n+### Benefits of Training and Inference using Apple Silicon Chips\\n \\n 1. Enables users to train larger networks or batch sizes locally\\n 2. Reduces data retrieval latency and provides the GPU with direct access to the full memory store due to unified memory architecture. \\ndiff --git a/src/accelerate/launchers.py b/src/accelerate/launchers.py\\nindex dcdead956..0387109b6 100644\\n--- a/src/accelerate/launchers.py\\n+++ b/src/accelerate/launchers.py\\n@@ -121,12 +121,17 @@ def notebook_launcher(function, args=(), num_processes=None, use_fp16=False, mix\\n start_processes(launcher, args=args, nprocs=num_processes, start_method=\"fork\")\\n \\n else:\\n- # No need for a distributed launch otherwise as it\\'s either CPU or one GPU.\\n- if torch.cuda.is_available():\\n+ # No need for a distributed launch otherwise as it\\'s either CPU, GPU or MPS.\\n+ use_mps_device = \"false\"\\n+ if torch.backends.mps.is_available():\\n+ print(\"Launching training on MPS.\")\\n+ use_mps_device = \"true\"\\n+ elif torch.cuda.is_available():\\n print(\"Launching training on one GPU.\")\\n else:\\n print(\"Launching training on CPU.\")\\n- function(*args)\\n+ with patch_environment(use_mps_device=use_mps_device):\\n+ function(*args)\\n \\n \\n def debug_launcher(function, args=(), num_processes=2):\\n',\n", - " 'code_comments': [],\n", - " 'context': [{'body': 'The docs for this PR live [here](/static-proxy?url=https%3A%2F%2Fmoon-ci-docs.huggingface.co%2Fdocs%2Faccelerate%2Fpr_695). All of your documentation changes will be reflected on that endpoint.',\n", - " 'from_author': False}],\n", - " 'url': 'https://api.github.com/repos/huggingface/accelerate/pulls/695'},\n", - " 1051471370: {'diff': 'diff --git a/src/accelerate/utils/random.py b/src/accelerate/utils/random.py\\nindex 51b7c3719..e95ed03bf 100644\\n--- a/src/accelerate/utils/random.py\\n+++ b/src/accelerate/utils/random.py\\n@@ -64,7 +64,7 @@ def synchronize_rng_state(rng_type: Optional[RNGType] = None, generator: Optiona\\n state = AcceleratorState()\\n if state.distributed_type == DistributedType.TPU:\\n rng_state = xm.mesh_reduce(\"random_seed\", rng_state, lambda x: x[0])\\n- elif state.distributed_type in [DistributedType.DEEPSPEED, DistributedType.MULTI_GPU]:\\n+ elif state.distributed_type in [DistributedType.DEEPSPEED, DistributedType.MULTI_GPU, DistributedType.FSDP]:\\n rng_state = rng_state.to(state.device)\\n torch.distributed.broadcast(rng_state, 0)\\n rng_state = rng_state.cpu()\\n',\n", - " 'code_comments': [],\n", - " 'context': [{'body': '_The documentation is not available anymore as the PR was closed or merged._',\n", - " 'from_author': False}],\n", - " 'url': 'https://api.github.com/repos/huggingface/accelerate/pulls/688'},\n", - " 1050421126: {'diff': 'diff --git a/src/accelerate/data_loader.py b/src/accelerate/data_loader.py\\nindex fb5393d34..84c2ebd67 100644\\n--- a/src/accelerate/data_loader.py\\n+++ b/src/accelerate/data_loader.py\\n@@ -622,6 +622,7 @@ def prepare_data_loader(\\n new_dataset = dataloader.dataset\\n # Iterable dataset doesn\\'t like batch_sampler, but data_loader creates a default one for it\\n new_batch_sampler = dataloader.batch_sampler if not isinstance(new_dataset, IterableDataset) else None\\n+ sampler_is_batch_sampler = False\\n generator = getattr(dataloader, \"generator\", None)\\n # No change if no multiprocess\\n if num_processes != 1 and not dispatch_batches:\\n@@ -645,8 +646,10 @@ def prepare_data_loader(\\n generator.manual_seed(int(torch.empty((), dtype=torch.int64).random_().item()))\\n elif getattr(dataloader.batch_sampler, \"generator\", None) is not None:\\n generator = dataloader.batch_sampler.generator\\n+ sampler_is_batch_sampler = isinstance(dataloader.sampler, BatchSampler)\\n+ batch_sampler = dataloader.sampler if sampler_is_batch_sampler else dataloader.batch_sampler\\n new_batch_sampler = BatchSamplerShard(\\n- dataloader.batch_sampler,\\n+ batch_sampler,\\n num_processes=num_processes,\\n process_index=process_index,\\n split_batches=split_batches,\\n@@ -684,6 +687,15 @@ def prepare_data_loader(\\n _drop_last=dataloader.drop_last,\\n **kwargs,\\n )\\n+ elif sampler_is_batch_sampler:\\n+ dataloader = DataLoaderShard(\\n+ new_dataset,\\n+ device=device if put_on_device and state.distributed_type != DistributedType.TPU else None,\\n+ sampler=new_batch_sampler,\\n+ rng_types=rng_types,\\n+ generator=generator,\\n+ **kwargs,\\n+ )\\n else:\\n dataloader = DataLoaderShard(\\n new_dataset,\\n',\n", - " 'code_comments': [],\n", - " 'context': [{'body': '_The documentation is not available anymore as the PR was closed or merged._',\n", - " 'from_author': False}],\n", - " 'url': 'https://api.github.com/repos/huggingface/accelerate/pulls/687'},\n", - " 1048925301: {'diff': 'diff --git a/src/accelerate/data_loader.py b/src/accelerate/data_loader.py\\nindex cbe39efa3..fb5393d34 100644\\n--- a/src/accelerate/data_loader.py\\n+++ b/src/accelerate/data_loader.py\\n@@ -462,10 +462,6 @@ def _fetch_batches(self, iterator):\\n else:\\n batch_info = [None, True]\\n broadcast_object_list(batch_info)\\n- if batch_info[1]:\\n- return batch, batch_info\\n- else:\\n- return batch, batch_info\\n return batch, batch_info\\n \\n def __iter__(self):\\n',\n", - " 'code_comments': [],\n", - " 'context': [{'body': '_The documentation is not available anymore as the PR was closed or merged._',\n", - " 'from_author': False}],\n", - " 'url': 'https://api.github.com/repos/huggingface/accelerate/pulls/686'},\n", - " 1047756035: {'diff': 'diff --git a/src/accelerate/accelerator.py b/src/accelerate/accelerator.py\\nindex 989774148..b17ea387d 100644\\n--- a/src/accelerate/accelerator.py\\n+++ b/src/accelerate/accelerator.py\\n@@ -745,6 +745,7 @@ def _prepare_deepspeed(self, *args):\\n )\\n else:\\n batch_size_per_device = deepspeed_plugin.deepspeed_config[\"train_micro_batch_size_per_gpu\"]\\n+ result = [obj for obj in args]\\n \\n config_kwargs = {\\n \"train_micro_batch_size_per_gpu\": batch_size_per_device,\\n',\n", - " 'code_comments': [],\n", - " 'context': [{'body': '_The documentation is not available anymore as the PR was closed or merged._',\n", - " 'from_author': False}],\n", - " 'url': 'https://api.github.com/repos/huggingface/accelerate/pulls/683'},\n", - " 1047708869: {'diff': 'diff --git a/src/accelerate/data_loader.py b/src/accelerate/data_loader.py\\nindex 9236ee310..cbe39efa3 100644\\n--- a/src/accelerate/data_loader.py\\n+++ b/src/accelerate/data_loader.py\\n@@ -463,10 +463,10 @@ def _fetch_batches(self, iterator):\\n batch_info = [None, True]\\n broadcast_object_list(batch_info)\\n if batch_info[1]:\\n- return batch, batch_info, True\\n+ return batch, batch_info\\n else:\\n- return batch, batch_info, True\\n- return batch, batch_info, False\\n+ return batch, batch_info\\n+ return batch, batch_info\\n \\n def __iter__(self):\\n self.gradient_state._set_end_of_dataloader(False)\\n@@ -477,11 +477,10 @@ def __iter__(self):\\n stop_iteration = False\\n self._stop_iteration = False\\n first_batch = None\\n- next_batch, next_batch_info, next_skip = self._fetch_batches(main_iterator)\\n+ next_batch, next_batch_info = self._fetch_batches(main_iterator)\\n while not stop_iteration:\\n- batch, batch_info, skip = next_batch, next_batch_info, next_skip\\n- if skip:\\n- continue\\n+ batch, batch_info = next_batch, next_batch_info\\n+\\n if self.state.process_index != 0:\\n # Initialize tensors on other processes than process 0.\\n batch = initialize_tensors(batch_info[0])\\n@@ -500,7 +499,7 @@ def __iter__(self):\\n if not stop_iteration:\\n # We may still be at the end of the dataloader without knowing it yet: if there is nothing left in\\n # the dataloader since the number of batches is a round multiple of the number of processes.\\n- next_batch, next_batch_info, next_skip = self._fetch_batches(main_iterator)\\n+ next_batch, next_batch_info = self._fetch_batches(main_iterator)\\n # next_batch_info[0] is None when there are no more batches, otherwise we still need to process them.\\n if self._stop_iteration and next_batch_info[0] is None:\\n stop_iteration = True\\n',\n", - " 'code_comments': [],\n", - " 'context': [{'body': '_The documentation is not available anymore as the PR was closed or merged._',\n", - " 'from_author': False},\n", - " {'body': \"Hello, for some reason I wasn't notified by github about review request, looks good. I was thinking if the below part of `_fetch_batches` could be simplified \\r\\n\\r\\n```diff\\r\\ndef _fetch_batches(self, iterator):\\r\\n batches, batch = None, None\\r\\n # On process 0, we gather the batch to dispatch.\\r\\n if self.state.process_index == 0:\\r\\n try:\\r\\n if self.split_batches:\\r\\n # One batch of the main iterator is dispatched and split.\\r\\n batch = next(iterator)\\r\\n else:\\r\\n # num_processes batches of the main iterator are concatenated then dispatched and split.\\r\\n # We add the batches one by one so we have the remainder available when drop_last=False.\\r\\n batches = []\\r\\n for _ in range(self.state.num_processes):\\r\\n batches.append(next(iterator))\\r\\n batch = concatenate(batches, dim=0)\\r\\n # In both cases, we need to get the structure of the batch that we will broadcast on other\\r\\n # processes to initialize the tensors with the right shape.\\r\\n # data_structure, stop_iteration\\r\\n batch_info = [get_data_structure(batch), False]\\r\\n except StopIteration:\\r\\n batch_info = [None, True]\\r\\n else:\\r\\n batch_info = [None, self._stop_iteration]\\r\\n # This is inplace, so after this instruction, every process has the same `batch_info` as process 0.\\r\\n broadcast_object_list(batch_info)\\r\\n self._stop_iteration = batch_info[1]\\r\\n if self._stop_iteration:\\r\\n # If drop_last is False and split_batches is False, we may have a remainder to take care of.\\r\\n if not self.split_batches and not self._drop_last:\\r\\n if self.state.process_index == 0 and len(batches) > 0:\\r\\n batch = concatenate(batches, dim=0)\\r\\n batch_info = [get_data_structure(batch), False]\\r\\n else:\\r\\n batch_info = [None, True]\\r\\n broadcast_object_list(batch_info)\\r\\n- if batch_info[1]:\\r\\n- return batch, batch_info\\r\\n- else:\\r\\n- return batch, batch_info\\r\\n return batch, batch_info\\r\\n```\",\n", - " 'from_author': False},\n", - " {'body': \"Yes, you're right thanks! Will make a follow-up PR.\",\n", - " 'from_author': True}],\n", - " 'url': 'https://api.github.com/repos/huggingface/accelerate/pulls/682'},\n", - " 1046590816: {'diff': 'diff --git a/src/accelerate/commands/launch.py b/src/accelerate/commands/launch.py\\nindex c7f235843..fad42792d 100644\\n--- a/src/accelerate/commands/launch.py\\n+++ b/src/accelerate/commands/launch.py\\n@@ -270,6 +270,25 @@ def launch_command_parser(subparsers=None):\\n default=None,\\n help=\"The port to use to communicate with the machine of rank 0.\",\\n )\\n+ # Rendezvous related arguments\\n+ parser.add_argument(\\n+ \"--rdzv_conf\",\\n+ type=str,\\n+ default=\"\",\\n+ help=\"Additional rendezvous configuration (=,=,...).\",\\n+ )\\n+ parser.add_argument(\\n+ \"--max_restarts\",\\n+ type=int,\\n+ default=0,\\n+ help=\"Maximum number of worker group restarts before failing.\",\\n+ )\\n+ parser.add_argument(\\n+ \"--monitor_interval\",\\n+ type=float,\\n+ default=5,\\n+ help=\"Interval, in seconds, to monitor the state of workers.\",\\n+ )\\n parser.add_argument(\\n \"--main_training_function\",\\n type=str,\\n',\n", - " 'code_comments': [],\n", - " 'context': [{'body': '_The documentation is not available anymore as the PR was closed or merged._',\n", - " 'from_author': False},\n", - " {'body': '> It looks like those new fields are not used anywhere afterwards?\\r\\n\\r\\nHello, it is being used via below lines:\\r\\n\\r\\nhttps://github.com/huggingface/accelerate/blob/4400eb90b29638deec117f95fecdcb79bd7684cb/src/accelerate/commands/launch.py#L465\\r\\n\\r\\nhttps://github.com/huggingface/accelerate/blob/4400eb90b29638deec117f95fecdcb79bd7684cb/src/accelerate/utils/launch.py#L51',\n", - " 'from_author': True}],\n", - " 'url': 'https://api.github.com/repos/huggingface/accelerate/pulls/680'},\n", - " 1045415772: {'diff': 'diff --git a/src/accelerate/accelerator.py b/src/accelerate/accelerator.py\\nindex 27db19b9b..989774148 100644\\n--- a/src/accelerate/accelerator.py\\n+++ b/src/accelerate/accelerator.py\\n@@ -723,25 +723,28 @@ def _prepare_deepspeed(self, *args):\\n \\n deepspeed_plugin = self.state.deepspeed_plugin\\n \\n- result = [\\n- self._prepare_one(obj, first_pass=True) if isinstance(obj, torch.utils.data.DataLoader) else obj\\n- for obj in args\\n- ]\\n-\\n- batch_sizes = [obj.batch_size for obj in args if hasattr(obj, \"batch_size\")]\\n- if self.split_batches:\\n- batch_sizes = [batch_size // self.num_processes for batch_size in batch_sizes]\\n- if len(batch_sizes) == 0:\\n- raise ValueError(\\n- \"You must specify a training or evaluation dataloader in `accelerate.prepare()` when using DeepSpeed.\"\\n- )\\n+ if deepspeed_plugin.deepspeed_config[\"train_micro_batch_size_per_gpu\"] == \"auto\":\\n+ result = [\\n+ self._prepare_one(obj, first_pass=True) if isinstance(obj, torch.utils.data.DataLoader) else obj\\n+ for obj in args\\n+ ]\\n+\\n+ batch_sizes = [obj.batch_size for obj in args if hasattr(obj, \"batch_size\")]\\n+ if self.split_batches:\\n+ batch_sizes = [batch_size // self.num_processes for batch_size in batch_sizes]\\n+ if len(batch_sizes) == 0:\\n+ raise ValueError(\\n+ \"You must specify a training or evaluation dataloader in `accelerate.prepare()` when using DeepSpeed.\"\\n+ )\\n \\n- batch_size_per_device = min(batch_sizes) if deepspeed_plugin.is_train_batch_min else max(batch_sizes)\\n- if len(batch_sizes) > 1:\\n- logger.info(\\n- \"Since you passed both train and evaluation dataloader, `is_train_batch_min` (here \"\\n- f\"{deepspeed_plugin.is_train_batch_min} will decide the `train_batch_size` ({batch_size_per_device}).\"\\n- )\\n+ batch_size_per_device = min(batch_sizes) if deepspeed_plugin.is_train_batch_min else max(batch_sizes)\\n+ if len(batch_sizes) > 1:\\n+ logger.info(\\n+ \"Since you passed both train and evaluation dataloader, `is_train_batch_min` (here \"\\n+ f\"{deepspeed_plugin.is_train_batch_min} will decide the `train_batch_size` ({batch_size_per_device}).\"\\n+ )\\n+ else:\\n+ batch_size_per_device = deepspeed_plugin.deepspeed_config[\"train_micro_batch_size_per_gpu\"]\\n \\n config_kwargs = {\\n \"train_micro_batch_size_per_gpu\": batch_size_per_device,\\n@@ -916,7 +919,9 @@ def backward(self, loss, **kwargs):\\n \\n Should be used in lieu of `loss.backward()`.\\n \"\"\"\\n- loss /= self.gradient_accumulation_steps\\n+ if self.distributed_type != DistributedType.DEEPSPEED:\\n+ # deepspeed handles loss scaling by gradient_accumulation_steps in its `backward`\\n+ loss = loss / self.gradient_accumulation_steps\\n if self.distributed_type == DistributedType.DEEPSPEED:\\n self.deepspeed_engine_wrapped.backward(loss, **kwargs)\\n elif self.scaler is not None:\\ndiff --git a/src/accelerate/commands/launch.py b/src/accelerate/commands/launch.py\\nindex c7f235843..30e497e84 100644\\n--- a/src/accelerate/commands/launch.py\\n+++ b/src/accelerate/commands/launch.py\\n@@ -549,7 +549,8 @@ def deepspeed_launcher(args):\\n current_env[\"DEEPSPEED_OFFLOAD_PARAM_DEVICE\"] = str(args.offload_param_device).lower()\\n current_env[\"DEEPSPEED_ZERO3_INIT\"] = str(args.zero3_init_flag).lower()\\n current_env[\"DEEPSPEED_ZERO3_SAVE_16BIT_MODEL\"] = str(args.zero3_save_16bit_model).lower()\\n- current_env[\"DEEPSPEED_CONFIG_FILE\"] = str(args.deepspeed_config_file).lower()\\n+ if args.deepspeed_config_file is not None:\\n+ current_env[\"DEEPSPEED_CONFIG_FILE\"] = str(args.deepspeed_config_file)\\n \\n if args.num_machines > 1 and args.deepspeed_multinode_launcher != DEEPSPEED_MULTINODE_LAUNCHERS[1]:\\n with open(\".deepspeed_env\", \"a\") as f:\\n',\n", - " 'code_comments': [],\n", - " 'context': [{'body': '_The documentation is not available anymore as the PR was closed or merged._',\n", - " 'from_author': False},\n", - " {'body': 'Hmm, I tried to remove the fake dataloader workaround that was discussed at /static-proxy?url=https%3A%2F%2Fdiscuss.huggingface.co%2Ft%2Fwhen-using-deepspeed-why-do-i-need-to-pass-dataloaders-to-the-accelerator-prepare%2F22432%5C%5Cr%5C%5Cn%5C%5Cr%5C%5Cnand it\\'s super cumbersome. It appears that the only way to get to the batch size is from dataloader? why can\\'t it be derived from a `batch_size` argument? or is it by design that `batch_size` is derived from dataloader?\\r\\n\\r\\nSpecifically to this PR:\\r\\n\\r\\n1. The original idea behind auto was to try to make the ds config file as simple as possible wrt to hardcoding values and have command line args set these things once. So this doesn\\'t work:\\r\\n\\r\\n```\\r\\n \"train_micro_batch_size_per_gpu\": \"auto\",\\r\\n```\\r\\n\\r\\nTo remind, the intention of creating `auto`-values in DS config was to avoid mismatch of the same configuration value used by different sub-systems. So the way I was thinking when integrating DS in HF Trainer, is let\\'s have one place where `batch_size` is set and then all other sub-systems would inherit that value, rather than relying on the user to remember to change the same value in many places. I hope this makes sense. I don\\'t understand the design here so I\\'m surely missing something important.\\r\\n\\r\\nI was trying to remove the originally used workaround \\r\\nhttps://github.com/huggingface/m4/pull/610\\r\\nbut I think the workaround with the dummy dataloader is a way user-friendlier than the hardcoded `train_micro_batch_size_per_gpu` config value.\\r\\n\\r\\n2. Also while this feature that supports ` train_micro_batch_size_per_gpu` config was added, the end user has no idea it\\'s supported w/o reading the source code - the error message is still:\\r\\n\\r\\n> You must specify a training or evaluation dataloader in `accelerate.prepare()` when using DeepSpeed.\\r\\n\\r\\nShould it perhaps say:\\r\\n\\r\\n> when using DeepSpeed `accelerate.prepare()` requires you to pass at least one of training or evaluation dataloaders or alternatively set an integer value in `train_micro_batch_size_per_gpu` in the deepspeed config file.\\r\\n\\r\\nor something of a kind? otherwise the feature is there but nobody knows about it.\\r\\n\\r\\nThe API doc could also say that, except it\\'s private so there is no documentation.\\r\\n\\r\\nThank you for reading.',\n", - " 'from_author': False},\n", - " {'body': 'Hello Stas,\\r\\n\\r\\n1. Inline with the intention of using `auto` values, we use the dataloader to infer the `batch_size`, one reason reason being that `accelerator.prepare` function or `accelerator` object doesn\\'t have access to command line arguments, i.e., `batch_size` argument from the main code. Another reason being that the single place where `batch_size` gets set is while the user creates dataloaders as part of conventional training, e.g., the user can have `batch_size` argument which they may modify because of certain custom logic and the modified batch_size now gets used to create dataloaders. As we have no control over training loop/code unlike Trainer, it makes sense to infer `batch_size` directly from dataloaders. \\r\\n\\r\\nWould the alternative suggestion work?\\r\\n`AcceleratorState().deepspeed_plugin.deepspeed_config[\"train_micro_batch_size_per_gpu\"]=my_batch_size` before calling `accelerator.prepare()`?\\r\\n\\r\\n2. Thank you for the pointer on the error message, I will update it.\\r\\n',\n", - " 'from_author': True},\n", - " {'body': 'Hi Sourab!\\r\\n\\r\\nIt appears that `accelerator.prepare` relies on the `deepspeed_plugin.*` settings - if those were already parsed so that any `auto`s get replaced with the proper values then it\\'d have access to the correct setting, no? or is it still too early in the game - perhaps it\\'s a matter of an order of execution?\\r\\n\\r\\nPerhaps there should be another wrapper that a user should call explicitly for deepspeed with args like bs early in the code, so that no `auto` values remain and then it\\'d be easy to rely on the actual values later on. All the values should be available early on. e.g. in HF Trainer we only had to wait for later for `num_training_steps`:\\r\\nhttps://github.com/huggingface/transformers/blob/0a144b8c6bd16f7b08119a233f8e7cbd33fe5bfd/src/transformers/deepspeed.py#L167\\r\\n\\r\\n> Would the alternative suggestion work?\\r\\n> AcceleratorState().deepspeed_plugin.deepspeed_config[\"train_micro_batch_size_per_gpu\"]=my_batch_size before calling accelerator.prepare()?\\r\\n\\r\\nThat would definitely work.\\r\\n\\r\\nMy first reaction is that suggestion could potentially be much more problematic should the user set the value in the ds config file and it might be an unexpected override (even though if written correctly it should be the same value). Somehow this feels like replacing one hack with another hack.\\r\\n\\r\\nI think the dummy dataset wrapped dataloader is a much cleaner way over the above, especially if the code isn\\'t necessarily always using the deepspeed backend.\\r\\n\\r\\nIf this is the best that can be done, and there is no simpler way, let\\'s just leave it as is.\\r\\n',\n", - " 'from_author': False},\n", - " {'body': '> Perhaps there should be another wrapper that a user should call explicitly for deepspeed with args like bs early in the code, so that no `auto` values remain and then it\\'d be easy to rely on the actual values later on\\r\\n\\r\\nThere is already a way to do this `deepspeed_plugin.deepspeed_config_process(**kwargs)` (Process the DeepSpeed config with the values from the kwargs.): https://github.com/huggingface/accelerate/blob/main/src/accelerate/utils/dataclasses.py#L417\\r\\nExample below:\\r\\n\\r\\n```\\r\\nif AcceleratorState().deepspeed_plugin is not None:\\r\\n kwargs = {\\r\\n \"fp16.enabled\": True,\\r\\n \"optimizer.params.lr\": 5e-5,\\r\\n \"optimizer.params.weight_decay\": 0.0,\\r\\n \"scheduler.params.warmup_min_lr\": 0.0,\\r\\n \"scheduler.params.warmup_max_lr\": 5e-5,\\r\\n \"scheduler.params.warmup_num_steps\": 0,\\r\\n \"train_micro_batch_size_per_gpu\": my_batch_size,\\r\\n \"gradient_clipping\": 1.0,\\r\\n } \\r\\n AcceleratorState().deepspeed_plugin.deepspeed_config_process(must_match=True, **kwargs)\\n\\n# call `accelerator.prepare` below `_` are just dummy placeholders \\r\\n_, _, _ = accelerator.prepare(_, _, _)\\r\\n\\r\\n...\\r\\n\\r\\n```',\n", - " 'from_author': True},\n", - " {'body': 'should `deepspeed_config_process` call that last `accelerator.prepare` command internally? This looks super-weird with `_` input and `_` output args, unless you meant something else there.',\n", - " 'from_author': False},\n", - " {'body': \"Hello @stas00 , in the example above, it is the user code, I was just mentioning/showcasing that `deepspeed_config_process` should be called before `accelerator.prepare`. As mentioned, `deepspeed_config_process` only sets `auto` values from the given kwargs dict, doesn't internally call `accelerator.prepare` at all and those `_` were placeholders 😅. \",\n", - " 'from_author': True},\n", - " {'body': \"Thank you for clarifying, @pacman100! It's crystal clear now.\",\n", - " 'from_author': False}],\n", - " 'url': 'https://api.github.com/repos/huggingface/accelerate/pulls/676'},\n", - " 1045254185: {'diff': 'diff --git a/src/accelerate/commands/config/cluster.py b/src/accelerate/commands/config/cluster.py\\nindex 9b14aeaed..a174115b1 100644\\n--- a/src/accelerate/commands/config/cluster.py\\n+++ b/src/accelerate/commands/config/cluster.py\\n@@ -59,7 +59,7 @@ def get_cluster_input():\\n lambda x: int(x),\\n )\\n same_network = _ask_field(\\n- \"Are all the machines on the same network? [YES/no]: \",\\n+ \"Are all the machines on the same local network? Answer `no` if nodes are on the cloud and/or on different network hosts [YES/no]: \",\\n _convert_yes_no_to_bool,\\n default=True,\\n error_message=\"Please enter yes or no.\",\\n',\n", - " 'code_comments': [{'body': '```suggestion\\n \"Are all the machines on the same local network? Answer `no` if nodes are on the cloud and/or on the different network hosts [YES/no]: \",\\n```\\n',\n", - " 'diff_hunk': '@@ -59,7 +59,7 @@ def get_cluster_input():\\n lambda x: int(x),\\n )\\n same_network = _ask_field(\\n- \"Are all the machines on the same network? [YES/no]: \",\\n+ \"Are all the machines on the same local network? [YES/no]: \",',\n", - " 'from_author': False}],\n", - " 'context': [{'body': '_The documentation is not available anymore as the PR was closed or merged._',\n", - " 'from_author': False}],\n", - " 'url': 'https://api.github.com/repos/huggingface/accelerate/pulls/674'},\n", - " 1045217690: {'diff': 'diff --git a/setup.py b/setup.py\\nindex 4bc977029..409399b17 100644\\n--- a/setup.py\\n+++ b/setup.py\\n@@ -21,9 +21,10 @@\\n extras[\"test_prod\"] = [\"pytest\", \"pytest-xdist\", \"pytest-subtests\", \"parameterized\"]\\n extras[\"test_dev\"] = [\"datasets\", \"evaluate\", \"transformers\", \"scipy\", \"sklearn\", \"deepspeed<0.7.0\", \"tqdm\"]\\n extras[\"testing\"] = extras[\"test_prod\"] + extras[\"test_dev\"]\\n+extras[\"rich\"] = [\"rich\"]\\n \\n extras[\"test_trackers\"] = [\"wandb\", \"comet-ml\", \"tensorboard\"]\\n-extras[\"dev\"] = extras[\"quality\"] + extras[\"testing\"]\\n+extras[\"dev\"] = extras[\"quality\"] + extras[\"testing\"] + extras[\"rich\"]\\n \\n extras[\"sagemaker\"] = [\\n \"sagemaker\", # boto3 is a required package in sagemaker\\n@@ -50,7 +51,7 @@\\n ]\\n },\\n python_requires=\">=3.7.0\",\\n- install_requires=[\"numpy>=1.17\", \"packaging>=20.0\", \"psutil\", \"pyyaml\", \"torch>=1.4.0\", \"rich\"],\\n+ install_requires=[\"numpy>=1.17\", \"packaging>=20.0\", \"psutil\", \"pyyaml\", \"torch>=1.4.0\"],\\n extras_require=extras,\\n classifiers=[\\n \"Development Status :: 5 - Production/Stable\",\\ndiff --git a/src/accelerate/__init__.py b/src/accelerate/__init__.py\\nindex 03a7198ac..10c258efa 100644\\n--- a/src/accelerate/__init__.py\\n+++ b/src/accelerate/__init__.py\\n@@ -16,7 +16,11 @@\\n InitProcessGroupKwargs,\\n find_executable_batch_size,\\n infer_auto_device_map,\\n+ is_rich_available,\\n load_checkpoint_in_model,\\n- rich,\\n synchronize_rng_states,\\n )\\n+\\n+\\n+if is_rich_available():\\n+ from .utils import rich\\ndiff --git a/src/accelerate/commands/launch.py b/src/accelerate/commands/launch.py\\nindex 491ea5ad3..c7f235843 100644\\n--- a/src/accelerate/commands/launch.py\\n+++ b/src/accelerate/commands/launch.py\\n@@ -39,22 +39,25 @@\\n _filter_args,\\n get_launch_prefix,\\n is_deepspeed_available,\\n+ is_rich_available,\\n is_sagemaker_available,\\n is_torch_version,\\n patch_environment,\\n )\\n from accelerate.utils.constants import DEEPSPEED_MULTINODE_LAUNCHERS\\n from accelerate.utils.dataclasses import SageMakerDistributedType\\n-from rich import get_console\\n-from rich.logging import RichHandler\\n \\n \\n-if is_torch_version(\">=\", \"1.9.0\"):\\n- import torch.distributed.run as distrib_run\\n+if is_rich_available():\\n+ from rich import get_console\\n+ from rich.logging import RichHandler\\n+\\n+ FORMAT = \"%(message)s\"\\n+ logging.basicConfig(format=FORMAT, datefmt=\"[%X]\", handlers=[RichHandler()])\\n \\n \\n-FORMAT = \"%(message)s\"\\n-logging.basicConfig(format=FORMAT, datefmt=\"[%X]\", handlers=[RichHandler()])\\n+if is_torch_version(\">=\", \"1.9.0\"):\\n+ import torch.distributed.run as distrib_run\\n \\n logger = logging.getLogger(__name__)\\n \\ndiff --git a/src/accelerate/utils/__init__.py b/src/accelerate/utils/__init__.py\\nindex 2b8f566f0..6ec2f4ac4 100644\\n--- a/src/accelerate/utils/__init__.py\\n+++ b/src/accelerate/utils/__init__.py\\n@@ -27,6 +27,7 @@\\n is_comet_ml_available,\\n is_datasets_available,\\n is_deepspeed_available,\\n+ is_rich_available,\\n is_sagemaker_available,\\n is_tensorboard_available,\\n is_tpu_available,\\ndiff --git a/src/accelerate/utils/imports.py b/src/accelerate/utils/imports.py\\nindex 3efb61034..6015f2c14 100644\\n--- a/src/accelerate/utils/imports.py\\n+++ b/src/accelerate/utils/imports.py\\n@@ -111,6 +111,10 @@ def is_boto3_available():\\n return importlib.util.find_spec(\"boto3\") is not None\\n \\n \\n+def is_rich_available():\\n+ return importlib.util.find_spec(\"rich\") is not None\\n+\\n+\\n def is_sagemaker_available():\\n return importlib.util.find_spec(\"sagemaker\") is not None\\n \\ndiff --git a/src/accelerate/utils/rich.py b/src/accelerate/utils/rich.py\\nindex 7a54c8b5a..2d48661b7 100644\\n--- a/src/accelerate/utils/rich.py\\n+++ b/src/accelerate/utils/rich.py\\n@@ -12,7 +12,13 @@\\n # See the License for the specific language governing permissions and\\n # limitations under the License.\\n \\n-from rich.traceback import install\\n+from .imports import is_rich_available\\n \\n \\n-install(show_locals=False)\\n+if is_rich_available():\\n+ from rich.traceback import install\\n+\\n+ install(show_locals=False)\\n+\\n+else:\\n+ raise ModuleNotFoundError(\"To use the rich extension, install rich with `pip install rich`\")\\n',\n", - " 'code_comments': [],\n", - " 'context': [{'body': '_The documentation is not available anymore as the PR was closed or merged._',\n", - " 'from_author': False}],\n", - " 'url': 'https://api.github.com/repos/huggingface/accelerate/pulls/673'},\n", - " 1045197885: {'diff': 'diff --git a/src/accelerate/commands/config/cluster.py b/src/accelerate/commands/config/cluster.py\\nindex 65c0e56b2..9b14aeaed 100644\\n--- a/src/accelerate/commands/config/cluster.py\\n+++ b/src/accelerate/commands/config/cluster.py\\n@@ -38,6 +38,7 @@ def get_cluster_input():\\n main_process_ip = None\\n main_process_port = None\\n rdzv_backend = \"static\"\\n+ same_network = True\\n if distributed_type in [DistributedType.MULTI_GPU, DistributedType.MULTI_CPU]:\\n num_machines = _ask_field(\\n \"How many different machines will you use (use more than 1 for multi-node training)? [1]: \",\\n@@ -57,9 +58,16 @@ def get_cluster_input():\\n \"What is the port you will use to communicate with the main process? \",\\n lambda x: int(x),\\n )\\n- rdzv_backend = _ask_field(\\n- \"What rendezvous backend will you use? (\\'static\\', \\'c10d\\', ...)\", default=\"static\"\\n+ same_network = _ask_field(\\n+ \"Are all the machines on the same network? [YES/no]: \",\\n+ _convert_yes_no_to_bool,\\n+ default=True,\\n+ error_message=\"Please enter yes or no.\",\\n )\\n+ if not same_network:\\n+ rdzv_backend = _ask_field(\\n+ \"What rendezvous backend will you use? (\\'static\\', \\'c10d\\', ...): \", default=\"static\"\\n+ )\\n \\n if distributed_type == DistributedType.NO:\\n use_cpu = _ask_field(\\n@@ -328,4 +336,5 @@ def get_cluster_input():\\n fsdp_config=fsdp_config,\\n use_cpu=use_cpu,\\n rdzv_backend=rdzv_backend,\\n+ same_network=same_network,\\n )\\ndiff --git a/src/accelerate/commands/config/config_args.py b/src/accelerate/commands/config/config_args.py\\nindex e78aad181..68da64963 100644\\n--- a/src/accelerate/commands/config/config_args.py\\n+++ b/src/accelerate/commands/config/config_args.py\\n@@ -138,6 +138,7 @@ class ClusterConfig(BaseConfig):\\n main_process_ip: Optional[str] = None\\n main_process_port: Optional[int] = None\\n rdzv_backend: Optional[str] = \"static\"\\n+ same_network: Optional[bool] = False\\n main_training_function: str = \"main\"\\n \\n # args for deepspeed_plugin\\ndiff --git a/src/accelerate/commands/launch.py b/src/accelerate/commands/launch.py\\nindex fcc80bb7f..491ea5ad3 100644\\n--- a/src/accelerate/commands/launch.py\\n+++ b/src/accelerate/commands/launch.py\\n@@ -375,15 +375,21 @@ def simple_launcher(args):\\n def multi_gpu_launcher(args):\\n num_processes = getattr(args, \"num_processes\")\\n num_machines = getattr(args, \"num_machines\")\\n+ main_process_ip = getattr(args, \"main_process_ip\")\\n+ main_process_port = getattr(args, \"main_process_port\")\\n if num_machines > 1:\\n setattr(args, \"nproc_per_node\", str(num_processes // num_machines))\\n setattr(args, \"nnodes\", str(num_machines))\\n- setattr(args, \"node_rank\", str(args.machine_rank))\\n- setattr(args, \"rdzv_endpoint\", f\"{args.main_process_ip}:{args.main_process_port}\")\\n+ setattr(args, \"node_rank\", int(args.machine_rank))\\n+ if getattr(args, \"same_network\"):\\n+ setattr(args, \"master_addr\", str(main_process_ip))\\n+ setattr(args, \"master_port\", str(main_process_port))\\n+ else:\\n+ setattr(args, \"rdzv_endpoint\", f\"{main_process_ip}:{main_process_port}\")\\n else:\\n setattr(args, \"nproc_per_node\", str(num_processes))\\n- if args.main_process_port is not None:\\n- setattr(args, \"master_port\", str(args.main_process_port))\\n+ if main_process_port is not None:\\n+ setattr(args, \"master_port\", str(main_process_port))\\n \\n if args.module and args.no_python:\\n raise ValueError(\"--module and --no_python cannot be used together\")\\n',\n", - " 'code_comments': [],\n", - " 'context': [{'body': '_The documentation is not available anymore as the PR was closed or merged._',\n", - " 'from_author': False}],\n", - " 'url': 'https://api.github.com/repos/huggingface/accelerate/pulls/672'},\n", - " 1044967618: {'diff': 'diff --git a/docs/source/usage_guides/big_modeling.mdx b/docs/source/usage_guides/big_modeling.mdx\\nindex 9b3252809..1e13849c6 100644\\n--- a/docs/source/usage_guides/big_modeling.mdx\\n+++ b/docs/source/usage_guides/big_modeling.mdx\\n@@ -35,7 +35,13 @@ While this works very well for regularly sized models, this workflow has some cl\\n \\n \\n \\n-## Instantiating an empty model\\n+## How the Process Works: A Quick Overview\\n+\\n+\\n+\\n+## How the Process Works: Working with Code\\n+\\n+### Instantiating an empty model\\n \\n The first tool 🤗 Accelerate introduces to help with big models is a context manager [`init_empty_weights`] that helps you initialize a model without using any RAM, so that step 1 can be done on models of any size. Here is how it works:\\n \\n@@ -61,7 +67,7 @@ initializes an empty model with a bit more than 100B parameters. Behind the scen\\n \\n \\n \\n-## Sharded checkpoints\\n+### Sharded checkpoints\\n \\n It\\'s possible your model is so big that even a single copy won\\'t fit in RAM. That doesn\\'t mean it can\\'t be loaded: if you have one or several GPUs, this is more memory available to store your model. In this case, it\\'s better if your checkpoint is split in several smaller files that we call checkpoint shards.\\n \\n@@ -86,7 +92,7 @@ with index.json being the following file:\\n \\n and `first_state_dict.bin` containing the weights for `\"linear1.weight\"` and `\"linear1.bias\"`, `second_state_dict.bin` the ones for `\"linear2.weight\"` and `\"linear2.bias\"`\\n \\n-## Loading weights\\n+### Loading weights\\n \\n The second tool 🤗 Accelerate introduces is a function [`load_checkpoint_and_dispatch`], that will allow you to load a checkpoint inside your empty model. This supports full checkpoints (a single file containing the whole state dict) as well as sharded checkpoints. It will also automatically dispatch those weights across the devices you have available (GPUs, CPU RAM), so if you are loading a sharded checkpoint, the maximum RAM usage will be the size of the biggest shard.\\n \\n@@ -176,7 +182,7 @@ You can also design your `device_map` yourself, if you prefer to explicitly deci\\n model = load_checkpoint_and_dispatch(model, \"sharded-gpt-j-6B\", device_map=my_device_map)\\n ```\\n \\n-## Run the model\\n+### Run the model\\n \\n Now that we have done this, our model lies across several devices, and maybe the hard drive. But it can still be used as a regular PyTorch model:\\n \\n@@ -203,7 +209,7 @@ This way, you model can run for inference even if it doesn\\'t fit on one of the G\\n \\n \\n \\n-## Designing a device map\\n+### Designing a device map\\n \\n You can let 🤗 Accelerate handle the device map computation by setting `device_map` to one of the supported options (`\"auto\"`, `\"balanced\"`, `\"balanced_low_0\"`, `\"sequential\"`) or create one yourself, if you want more control over where each layer should go.\\n \\ndiff --git a/manim_animations/big_model_inference/stage_1.py b/manim_animations/big_model_inference/stage_1.py\\nnew file mode 100644\\nindex 000000000..81ec0c965\\n--- /dev/null\\n+++ b/manim_animations/big_model_inference/stage_1.py\\n@@ -0,0 +1,108 @@\\n+# Copyright 2022 The HuggingFace Team. All rights reserved.\\n+#\\n+# Licensed under the Apache License, Version 2.0 (the \"License\");\\n+# you may not use this file except in compliance with the License.\\n+# You may obtain a copy of the License at\\n+#\\n+# http://www.apache.org/licenses/LICENSE-2.0\\n+#\\n+# Unless required by applicable law or agreed to in writing, software\\n+# distributed under the License is distributed on an \"AS IS\" BASIS,\\n+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\\n+# See the License for the specific language governing permissions and\\n+# limitations under the License.\\n+\\n+from manim import *\\n+\\n+\\n+class Stage1(Scene):\\n+ def construct(self):\\n+ mem = Rectangle(height=0.5,width=0.5)\\n+ fill = Rectangle(height=0.46,width=0.46).set_stroke(width=0)\\n+\\n+ cpu_left_col_base = [mem.copy() for i in range(6)]\\n+ cpu_right_col_base = [mem.copy() for i in range(6)]\\n+ cpu_left_col = VGroup(*cpu_left_col_base).arrange(UP, buff=0)\\n+ cpu_right_col = VGroup(*cpu_right_col_base).arrange(UP, buff=0)\\n+ cpu_rects = VGroup(cpu_left_col,cpu_right_col).arrange(RIGHT, buff=0)\\n+ cpu_text = Text(\"CPU\", font_size=24)\\n+ cpu = Group(cpu_rects,cpu_text).arrange(DOWN, buff=0.5, aligned_edge=DOWN)\\n+ cpu.move_to([-2.5,-.5,0])\\n+ self.add(cpu)\\n+\\n+ gpu_base = [mem.copy() for i in range(1)]\\n+ gpu_rect = VGroup(*gpu_base).arrange(UP,buff=0)\\n+ gpu_text = Text(\"GPU\", font_size=24)\\n+ gpu = Group(gpu_rect,gpu_text).arrange(DOWN, buff=0.5, aligned_edge=DOWN)\\n+ gpu.align_to(cpu, DOWN)\\n+ gpu.set_x(gpu.get_x() - 1)\\n+ \\n+ self.add(gpu)\\n+\\n+ model_base = [mem.copy() for i in range(6)]\\n+ model_rect = VGroup(*model_base).arrange(RIGHT,buff=0)\\n+\\n+ model_text = Text(\"Model\", font_size=24)\\n+ model = Group(model_rect,model_text).arrange(DOWN, buff=0.5, aligned_edge=DOWN)\\n+ model.move_to([3, -1., 0])\\n+ \\n+ self.play(\\n+ Create(cpu_left_col, run_time=1),\\n+ Create(cpu_right_col, run_time=1),\\n+ Create(gpu_rect, run_time=1),\\n+ )\\n+\\n+ step_1 = MarkupText(\\n+ f\"First, an empty model skeleton is loaded\\\\ninto memory without using much RAM.\", \\n+ font_size=24\\n+ )\\n+\\n+ key = Square(side_length=2.2)\\n+ key.move_to([-5, 2, 0])\\n+\\n+ key_text = MarkupText(\\n+ f\"Key:\\\\n\\\\n Empty Model\",\\n+ font_size=18,\\n+ )\\n+\\n+ key_text.move_to([-5, 2.4, 0])\\n+\\n+\\n+ step_1.move_to([2, 2, 0])\\n+ self.play(\\n+ Write(step_1, run_time=2.5),\\n+ Write(key_text),\\n+ Write(key)\\n+ )\\n+\\n+ self.add(model)\\n+ \\n+\\n+ cpu_targs = []\\n+ first_animations = []\\n+ second_animations = []\\n+ for i,rect in enumerate(model_base):\\n+\\n+ cpu_target = Rectangle(height=0.46,width=0.46).set_stroke(width=0.).set_fill(YELLOW, opacity=0.7)\\n+ cpu_target.move_to(rect)\\n+ cpu_target.generate_target()\\n+ cpu_target.target.height = 0.46/4\\n+ cpu_target.target.width = 0.46/3\\n+ \\n+ if i == 0:\\n+ cpu_target.target.next_to(cpu_left_col_base[0].get_corner(DOWN+LEFT), buff=0.02, direction=UP)\\n+ cpu_target.target.set_x(cpu_target.target.get_x()+0.1)\\n+ elif i == 3:\\n+ cpu_target.target.next_to(cpu_targs[0].target, direction=UP, buff=0.)\\n+ else:\\n+ cpu_target.target.next_to(cpu_targs[i-1].target, direction=RIGHT, buff=0.)\\n+ cpu_targs.append(cpu_target)\\n+\\n+ first_animations.append(rect.animate(run_time=0.5).set_stroke(YELLOW))\\n+ second_animations.append(MoveToTarget(cpu_target, run_time=1.5))\\n+\\n+ self.play(*first_animations)\\n+ self.play(*second_animations)\\n+ \\n+\\n+ self.wait()\\n\\\\ No newline at end of file\\ndiff --git a/manim_animations/big_model_inference/stage_2.py b/manim_animations/big_model_inference/stage_2.py\\nnew file mode 100644\\nindex 000000000..a30e9593b\\n--- /dev/null\\n+++ b/manim_animations/big_model_inference/stage_2.py\\n@@ -0,0 +1,126 @@\\n+# Copyright 2022 The HuggingFace Team. All rights reserved.\\n+#\\n+# Licensed under the Apache License, Version 2.0 (the \"License\");\\n+# you may not use this file except in compliance with the License.\\n+# You may obtain a copy of the License at\\n+#\\n+# http://www.apache.org/licenses/LICENSE-2.0\\n+#\\n+# Unless required by applicable law or agreed to in writing, software\\n+# distributed under the License is distributed on an \"AS IS\" BASIS,\\n+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\\n+# See the License for the specific language governing permissions and\\n+# limitations under the License.\\n+\\n+from manim import *\\n+\\n+class Stage2(Scene):\\n+ def construct(self):\\n+ mem = Rectangle(height=0.5,width=0.5)\\n+ fill = Rectangle(height=0.46,width=0.46).set_stroke(width=0)\\n+\\n+ cpu_left_col_base = [mem.copy() for i in range(6)]\\n+ cpu_right_col_base = [mem.copy() for i in range(6)]\\n+ cpu_left_col = VGroup(*cpu_left_col_base).arrange(UP, buff=0)\\n+ cpu_right_col = VGroup(*cpu_right_col_base).arrange(UP, buff=0)\\n+ cpu_rects = VGroup(cpu_left_col,cpu_right_col).arrange(RIGHT, buff=0)\\n+ cpu_text = Text(\"CPU\", font_size=24)\\n+ cpu = Group(cpu_rects,cpu_text).arrange(DOWN, buff=0.5, aligned_edge=DOWN)\\n+ cpu.move_to([-2.5,-.5,0])\\n+ self.add(cpu)\\n+\\n+ gpu_base = [mem.copy() for i in range(4)]\\n+ gpu_rect = VGroup(*gpu_base).arrange(UP,buff=0)\\n+ gpu_text = Text(\"GPU\", font_size=24)\\n+ gpu = Group(gpu_rect,gpu_text).arrange(DOWN, buff=0.5, aligned_edge=DOWN)\\n+ gpu.move_to([-1,-1,0])\\n+ self.add(gpu)\\n+\\n+ model_base = [mem.copy() for i in range(6)]\\n+ model_rect = VGroup(*model_base).arrange(RIGHT,buff=0)\\n+\\n+ model_text = Text(\"Model\", font_size=24)\\n+ model = Group(model_rect,model_text).arrange(DOWN, buff=0.5, aligned_edge=DOWN)\\n+ model.move_to([3, -1., 0])\\n+ self.add(model)\\n+ \\n+ cpu_targs = []\\n+ for i,rect in enumerate(model_base):\\n+ rect.set_stroke(YELLOW)\\n+ # target = fill.copy().set_fill(YELLOW, opacity=0.7)\\n+ # target.move_to(rect)\\n+ # self.add(target)\\n+\\n+ cpu_target = Rectangle(height=0.46/4,width=0.46/3).set_stroke(width=0.).set_fill(YELLOW, opacity=0.7)\\n+ \\n+ if i == 0:\\n+ cpu_target.next_to(cpu_left_col_base[0].get_corner(DOWN+LEFT), buff=0.02, direction=UP)\\n+ cpu_target.set_x(cpu_target.get_x()+0.1)\\n+ elif i == 3:\\n+ cpu_target.next_to(cpu_targs[0], direction=UP, buff=0.)\\n+ else:\\n+ cpu_target.next_to(cpu_targs[i-1], direction=RIGHT, buff=0.)\\n+ self.add(cpu_target)\\n+ cpu_targs.append(cpu_target)\\n+\\n+ \\n+\\n+ checkpoint_base = [mem.copy() for i in range(6)]\\n+ checkpoint_rect = VGroup(*checkpoint_base).arrange(RIGHT,buff=0)\\n+\\n+ checkpoint_text = Text(\"Loaded Checkpoint\", font_size=24)\\n+ checkpoint = Group(checkpoint_rect,checkpoint_text).arrange(DOWN, aligned_edge=DOWN, buff=0.4)\\n+ checkpoint.move_to([3, .5, 0])\\n+ \\n+ key = Square(side_length=2.2)\\n+ key.move_to([-5, 2, 0])\\n+\\n+ key_text = MarkupText(\\n+ f\"Key:\\\\n\\\\n Empty Model\",\\n+ font_size=18,\\n+ )\\n+\\n+ key_text.move_to([-5, 2.4, 0])\\n+\\n+ self.add(key_text, key)\\n+\\n+ blue_text = MarkupText(\\n+ f\" Checkpoint\",\\n+ font_size=18,\\n+ )\\n+\\n+ blue_text.next_to(key_text, DOWN*2.4, aligned_edge=key_text.get_left())\\n+\\n+ step_2 = MarkupText(\\n+ f\\'Next, a second model is loaded into memory,\\\\nwith the weights of a single shard.\\', \\n+ font_size=24\\n+ )\\n+ step_2.move_to([2, 2, 0])\\n+ self.play(\\n+ Write(step_2),\\n+ Write(blue_text)\\n+ )\\n+\\n+ self.play(\\n+ Write(checkpoint_text, run_time=1),\\n+ Create(checkpoint_rect, run_time=1)\\n+ )\\n+\\n+ first_animations = []\\n+ second_animations = []\\n+ for i,rect in enumerate(checkpoint_base):\\n+ target = fill.copy().set_fill(BLUE, opacity=0.7)\\n+ target.move_to(rect)\\n+ first_animations.append(GrowFromCenter(target, run_time=1))\\n+\\n+ cpu_target = target.copy()\\n+ cpu_target.generate_target()\\n+ if i < 5:\\n+ cpu_target.target.move_to(cpu_left_col_base[i+1])\\n+ else:\\n+ cpu_target.target.move_to(cpu_right_col_base[i-5])\\n+ second_animations.append(MoveToTarget(cpu_target, run_time=1.5))\\n+ \\n+ self.play(*first_animations)\\n+ self.play(*second_animations)\\n+ self.wait()\\n\\\\ No newline at end of file\\ndiff --git a/manim_animations/big_model_inference/stage_3.py b/manim_animations/big_model_inference/stage_3.py\\nnew file mode 100644\\nindex 000000000..4ba20c4b5\\n--- /dev/null\\n+++ b/manim_animations/big_model_inference/stage_3.py\\n@@ -0,0 +1,158 @@\\n+# Copyright 2022 The HuggingFace Team. All rights reserved.\\n+#\\n+# Licensed under the Apache License, Version 2.0 (the \"License\");\\n+# you may not use this file except in compliance with the License.\\n+# You may obtain a copy of the License at\\n+#\\n+# http://www.apache.org/licenses/LICENSE-2.0\\n+#\\n+# Unless required by applicable law or agreed to in writing, software\\n+# distributed under the License is distributed on an \"AS IS\" BASIS,\\n+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\\n+# See the License for the specific language governing permissions and\\n+# limitations under the License.\\n+\\n+from manim import *\\n+\\n+class Stage3(Scene):\\n+ def construct(self):\\n+ mem = Rectangle(height=0.5,width=0.5)\\n+ meta_mem = Rectangle(height=0.25,width=0.25)\\n+ fill = Rectangle(height=0.46,width=0.46).set_stroke(width=0)\\n+\\n+ cpu_left_col_base = [mem.copy() for i in range(6)]\\n+ cpu_right_col_base = [mem.copy() for i in range(6)]\\n+ cpu_left_col = VGroup(*cpu_left_col_base).arrange(UP, buff=0)\\n+ cpu_right_col = VGroup(*cpu_right_col_base).arrange(UP, buff=0)\\n+ cpu_rects = VGroup(cpu_left_col,cpu_right_col).arrange(RIGHT, buff=0)\\n+ cpu_text = Text(\"CPU\", font_size=24)\\n+ cpu = Group(cpu_rects,cpu_text).arrange(DOWN, buff=0.5, aligned_edge=DOWN)\\n+ cpu.move_to([-2.5,-.5,0])\\n+ self.add(cpu)\\n+\\n+ gpu_base = [mem.copy() for i in range(4)]\\n+ gpu_rect = VGroup(*gpu_base).arrange(UP,buff=0)\\n+ gpu_text = Text(\"GPU\", font_size=24)\\n+ gpu = Group(gpu_rect,gpu_text).arrange(DOWN, buff=0.5, aligned_edge=DOWN)\\n+ gpu.move_to([-1,-1,0])\\n+ self.add(gpu)\\n+\\n+ model_base = [mem.copy() for i in range(6)]\\n+ model_rect = VGroup(*model_base).arrange(RIGHT,buff=0)\\n+\\n+ model_text = Text(\"Model\", font_size=24)\\n+ model = Group(model_rect,model_text).arrange(DOWN, buff=0.5, aligned_edge=DOWN)\\n+ model.move_to([3, -1., 0])\\n+ self.add(model)\\n+\\n+ model_arr = []\\n+ model_cpu_arr = []\\n+ model_meta_arr = []\\n+ \\n+ for i,rect in enumerate(model_base):\\n+ rect.set_stroke(YELLOW)\\n+\\n+ cpu_target = Rectangle(height=0.46/4,width=0.46/3).set_stroke(width=0.).set_fill(YELLOW, opacity=0.7)\\n+ \\n+ if i == 0:\\n+ cpu_target.next_to(cpu_left_col_base[0].get_corner(DOWN+LEFT), buff=0.02, direction=UP)\\n+ cpu_target.set_x(cpu_target.get_x()+0.1)\\n+ elif i == 3:\\n+ cpu_target.next_to(model_cpu_arr[0], direction=UP, buff=0.)\\n+ else:\\n+ cpu_target.next_to(model_cpu_arr[i-1], direction=RIGHT, buff=0.)\\n+ self.add(cpu_target)\\n+ model_cpu_arr.append(cpu_target)\\n+\\n+ self.add(*model_arr, *model_cpu_arr, *model_meta_arr)\\n+\\n+ checkpoint_base = [mem.copy() for i in range(6)]\\n+ checkpoint_rect = VGroup(*checkpoint_base).arrange(RIGHT,buff=0)\\n+\\n+ checkpoint_text = Text(\"Loaded Checkpoint\", font_size=24)\\n+ checkpoint = Group(checkpoint_rect,checkpoint_text).arrange(DOWN, buff=0.5, aligned_edge=DOWN)\\n+ checkpoint.move_to([3, .5, 0])\\n+ \\n+ self.add(checkpoint)\\n+\\n+ ckpt_arr = []\\n+ ckpt_cpu_arr = []\\n+\\n+ for i,rect in enumerate(checkpoint_base):\\n+ target = fill.copy().set_fill(BLUE, opacity=0.7)\\n+ target.move_to(rect)\\n+ ckpt_arr.append(target)\\n+\\n+ cpu_target = target.copy()\\n+ if i < 5:\\n+ cpu_target.move_to(cpu_left_col_base[i+1])\\n+ else:\\n+ cpu_target.move_to(cpu_right_col_base[i-5])\\n+ ckpt_cpu_arr.append(cpu_target)\\n+ self.add(*ckpt_arr, *ckpt_cpu_arr)\\n+\\n+ key = Square(side_length=2.2)\\n+ key.move_to([-5, 2, 0])\\n+\\n+ key_text = MarkupText(\\n+ f\"Key:\\\\n\\\\n Empty Model\",\\n+ font_size=18,\\n+ )\\n+\\n+ key_text.move_to([-5, 2.4, 0])\\n+\\n+ self.add(key_text, key)\\n+\\n+ blue_text = MarkupText(\\n+ f\" Checkpoint\",\\n+ font_size=18,\\n+ )\\n+\\n+ blue_text.next_to(key_text, DOWN*2.4, aligned_edge=key_text.get_left())\\n+ self.add(blue_text)\\n+\\n+ step_3 = MarkupText(\\n+ f\\'Based on the passed in configuration, weights are stored in\\\\na variety of np.memmaps on disk or to a particular device.\\', \\n+ font_size=24\\n+ )\\n+ step_3.move_to([2, 2, 0])\\n+\\n+ disk_left_col_base = [meta_mem.copy() for i in range(6)]\\n+ disk_right_col_base = [meta_mem.copy() for i in range(6)]\\n+ disk_left_col = VGroup(*disk_left_col_base).arrange(UP, buff=0)\\n+ disk_right_col = VGroup(*disk_right_col_base).arrange(UP, buff=0)\\n+ disk_rects = VGroup(disk_left_col,disk_right_col).arrange(RIGHT, buff=0)\\n+ disk_text = Text(\"Disk\", font_size=24)\\n+ disk = Group(disk_rects,disk_text).arrange(DOWN, buff=0.5, aligned_edge=DOWN)\\n+ disk.move_to([-4.,-1.25,0])\\n+ self.play(\\n+ Write(step_3, run_time=3),\\n+ Write(disk_text, run_time=1),\\n+ Create(disk_rects, run_time=1)\\n+ )\\n+\\n+ animations = []\\n+ for i,rect in enumerate(ckpt_cpu_arr):\\n+ target = rect.copy()\\n+ target.generate_target()\\n+ target.target.move_to(disk_left_col_base[i]).scale(0.5)\\n+ animations.append(MoveToTarget(target, run_time=1.5))\\n+ self.play(*animations)\\n+\\n+ self.play(FadeOut(step_3))\\n+\\n+ step_4 = MarkupText(\\n+ f\\'Then, the checkpoint is removed from memory\\\\nthrough garbage collection.\\', \\n+ font_size=24\\n+ )\\n+ step_4.move_to([2, 2, 0])\\n+\\n+ self.play(\\n+ Write(step_4, run_time=3)\\n+ )\\n+\\n+ self.play(\\n+ FadeOut(checkpoint_rect, checkpoint_text, *ckpt_arr, *ckpt_cpu_arr),\\n+ )\\n+\\n+ self.wait() \\n\\\\ No newline at end of file\\ndiff --git a/manim_animations/big_model_inference/stage_4.py b/manim_animations/big_model_inference/stage_4.py\\nnew file mode 100644\\nindex 000000000..3a79ad97c\\n--- /dev/null\\n+++ b/manim_animations/big_model_inference/stage_4.py\\n@@ -0,0 +1,156 @@\\n+# Copyright 2022 The HuggingFace Team. All rights reserved.\\n+#\\n+# Licensed under the Apache License, Version 2.0 (the \"License\");\\n+# you may not use this file except in compliance with the License.\\n+# You may obtain a copy of the License at\\n+#\\n+# http://www.apache.org/licenses/LICENSE-2.0\\n+#\\n+# Unless required by applicable law or agreed to in writing, software\\n+# distributed under the License is distributed on an \"AS IS\" BASIS,\\n+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\\n+# See the License for the specific language governing permissions and\\n+# limitations under the License.\\n+\\n+from manim import *\\n+\\n+class Stage4(Scene):\\n+ def construct(self):\\n+ mem = Rectangle(height=0.5,width=0.5)\\n+ fill = Rectangle(height=0.46,width=0.46).set_stroke(width=0)\\n+ meta_mem = Rectangle(height=0.25,width=0.25)\\n+\\n+ cpu_left_col_base = [mem.copy() for i in range(6)]\\n+ cpu_right_col_base = [mem.copy() for i in range(6)]\\n+ cpu_left_col = VGroup(*cpu_left_col_base).arrange(UP, buff=0)\\n+ cpu_right_col = VGroup(*cpu_right_col_base).arrange(UP, buff=0)\\n+ cpu_rects = VGroup(cpu_left_col,cpu_right_col).arrange(RIGHT, buff=0)\\n+ cpu_text = Text(\"CPU\", font_size=24)\\n+ cpu = Group(cpu_rects,cpu_text).arrange(DOWN, buff=0.5, aligned_edge=DOWN)\\n+ cpu.move_to([-2.5,-.5,0])\\n+ self.add(cpu)\\n+\\n+ gpu_base = [mem.copy() for i in range(4)]\\n+ gpu_rect = VGroup(*gpu_base).arrange(UP,buff=0)\\n+ gpu_text = Text(\"GPU\", font_size=24)\\n+ gpu = Group(gpu_rect,gpu_text).arrange(DOWN, buff=0.5, aligned_edge=DOWN)\\n+ gpu.move_to([-1,-1,0])\\n+ self.add(gpu)\\n+\\n+ model_base = [mem.copy() for i in range(6)]\\n+ model_rect = VGroup(*model_base).arrange(RIGHT,buff=0)\\n+\\n+ model_text = Text(\"Model\", font_size=24)\\n+ model = Group(model_rect,model_text).arrange(DOWN, buff=0.5, aligned_edge=DOWN)\\n+ model.move_to([3, -1., 0])\\n+ self.add(model)\\n+\\n+ model_cpu_arr = []\\n+ model_meta_arr = []\\n+ \\n+ for i,rect in enumerate(model_base):\\n+ rect.set_stroke(YELLOW)\\n+\\n+ cpu_target = Rectangle(height=0.46/4,width=0.46/3).set_stroke(width=0.).set_fill(YELLOW, opacity=0.7)\\n+ \\n+ if i == 0:\\n+ cpu_target.next_to(cpu_left_col_base[0].get_corner(DOWN+LEFT), buff=0.02, direction=UP)\\n+ cpu_target.set_x(cpu_target.get_x()+0.1)\\n+ elif i == 3:\\n+ cpu_target.next_to(model_cpu_arr[0], direction=UP, buff=0.)\\n+ else:\\n+ cpu_target.next_to(model_cpu_arr[i-1], direction=RIGHT, buff=0.)\\n+ self.add(cpu_target)\\n+ model_cpu_arr.append(cpu_target)\\n+\\n+ self.add(*model_cpu_arr, *model_meta_arr)\\n+\\n+ disk_left_col_base = [meta_mem.copy() for i in range(6)]\\n+ disk_right_col_base = [meta_mem.copy() for i in range(6)]\\n+ disk_left_col = VGroup(*disk_left_col_base).arrange(UP, buff=0)\\n+ disk_right_col = VGroup(*disk_right_col_base).arrange(UP, buff=0)\\n+ disk_rects = VGroup(disk_left_col,disk_right_col).arrange(RIGHT, buff=0)\\n+ disk_text = Text(\"Disk\", font_size=24)\\n+ disk = Group(disk_rects,disk_text).arrange(DOWN, buff=0.5, aligned_edge=DOWN)\\n+ disk.move_to([-4.,-1.25,0])\\n+ self.add(disk_text, disk_rects)\\n+\\n+ cpu_disk_arr = []\\n+\\n+ for i in range(6):\\n+ target = fill.copy().set_fill(BLUE, opacity=0.8)\\n+ target.move_to(disk_left_col_base[i]).scale(0.5)\\n+ cpu_disk_arr.append(target)\\n+\\n+ self.add(*cpu_disk_arr)\\n+\\n+ key = Square(side_length=2.2)\\n+ key.move_to([-5, 2, 0])\\n+\\n+ key_text = MarkupText(\\n+ f\"Key:\\\\n\\\\n Empty Model\",\\n+ font_size=18,\\n+ )\\n+\\n+ key_text.move_to([-5, 2.4, 0])\\n+\\n+ self.add(key_text, key)\\n+\\n+ blue_text = MarkupText(\\n+ f\" Checkpoint\",\\n+ font_size=18,\\n+ )\\n+\\n+ blue_text.next_to(key_text, DOWN*2.4, aligned_edge=key_text.get_left())\\n+ self.add(blue_text)\\n+\\n+ step_5 = MarkupText(\\n+ f\\'The offloaded weights are all sent to the CPU.\\', \\n+ font_size=24\\n+ )\\n+ step_5.move_to([2, 2, 0])\\n+\\n+ self.play(Write(step_5, run_time=3))\\n+\\n+ for i in range(6):\\n+ rect = cpu_disk_arr[i]\\n+ cp2 = rect.copy().set_fill(BLUE, opacity=0.8).scale(2.0)\\n+ cp2.generate_target()\\n+ cp2.target.move_to(model_base[i])\\n+\\n+ if i == 0:\\n+ rect.set_fill(BLUE, opacity=0.8)\\n+ rect.generate_target()\\n+ rect.target.move_to(cpu_left_col_base[0]).scale(2.0)\\n+ \\n+ self.remove(*model_meta_arr, \\n+ *model_cpu_arr,\\n+ )\\n+\\n+ else:\\n+ rect.generate_target()\\n+ rect.target.move_to(cpu_left_col_base[i]).scale(2.0)\\n+ self.play(\\n+ MoveToTarget(rect),\\n+ MoveToTarget(cp2),\\n+ model_base[i].animate.set_stroke(WHITE)\\n+ )\\n+ self.play(FadeOut(step_5))\\n+\\n+ step_5 = MarkupText(\\n+ f\\'Finally, hooks are added to each weight in the model\\\\nto transfer the weights from CPU to GPU\\\\n\\\\t\\\\tand back when needed.\\', \\n+ font_size=24\\n+ )\\n+ step_5.move_to([2, 2, 0])\\n+\\n+ self.play(Write(step_5, run_time=3))\\n+\\n+ arrows = []\\n+ animations = []\\n+ for i in range(6):\\n+ a = Arrow(start=UP, end=DOWN, color=RED, buff=.5)\\n+ a.next_to(model_base[i].get_left(), UP, buff=0.2)\\n+ arrows.append(a)\\n+ animations.append(Write(a))\\n+ self.play(*animations)\\n+ self.wait() \\n\\\\ No newline at end of file\\ndiff --git a/manim_animations/big_model_inference/stage_5.py b/manim_animations/big_model_inference/stage_5.py\\nnew file mode 100644\\nindex 000000000..8b2ff3357\\n--- /dev/null\\n+++ b/manim_animations/big_model_inference/stage_5.py\\n@@ -0,0 +1,221 @@\\n+# Copyright 2022 The HuggingFace Team. All rights reserved.\\n+#\\n+# Licensed under the Apache License, Version 2.0 (the \"License\");\\n+# you may not use this file except in compliance with the License.\\n+# You may obtain a copy of the License at\\n+#\\n+# http://www.apache.org/licenses/LICENSE-2.0\\n+#\\n+# Unless required by applicable law or agreed to in writing, software\\n+# distributed under the License is distributed on an \"AS IS\" BASIS,\\n+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\\n+# See the License for the specific language governing permissions and\\n+# limitations under the License.\\n+\\n+from manim import *\\n+\\n+class Stage5(Scene):\\n+ def construct(self):\\n+ mem = Rectangle(height=0.5,width=0.5)\\n+ fill = Rectangle(height=0.46,width=0.46).set_stroke(width=0)\\n+\\n+ meta_mem = Rectangle(height=0.25,width=0.25)\\n+\\n+ cpu_left_col_base = [mem.copy() for i in range(6)]\\n+ cpu_right_col_base = [mem.copy() for i in range(6)]\\n+ cpu_left_col = VGroup(*cpu_left_col_base).arrange(UP, buff=0)\\n+ cpu_right_col = VGroup(*cpu_right_col_base).arrange(UP, buff=0)\\n+ cpu_rects = VGroup(cpu_left_col,cpu_right_col).arrange(RIGHT, buff=0)\\n+ cpu_text = Text(\"CPU\", font_size=24)\\n+ cpu = Group(cpu_rects,cpu_text).arrange(DOWN, buff=0.5, aligned_edge=DOWN)\\n+ cpu.move_to([-2.5,-.5,0])\\n+ self.add(cpu)\\n+\\n+ gpu_base = [mem.copy() for i in range(4)]\\n+ gpu_rect = VGroup(*gpu_base).arrange(UP,buff=0)\\n+ gpu_text = Text(\"GPU\", font_size=24)\\n+ gpu = Group(gpu_rect,gpu_text).arrange(DOWN, buff=0.5, aligned_edge=DOWN)\\n+ gpu.move_to([-1,-1,0])\\n+ self.add(gpu)\\n+\\n+ model_base = [mem.copy() for i in range(6)]\\n+ model_rect = VGroup(*model_base).arrange(RIGHT,buff=0)\\n+\\n+ model_text = Text(\"Model\", font_size=24)\\n+ model = Group(model_rect,model_text).arrange(DOWN, buff=0.5, aligned_edge=DOWN)\\n+ model.move_to([3, -1., 0])\\n+ self.add(model)\\n+\\n+ model_arr = []\\n+ model_cpu_arr = []\\n+ \\n+ for i,rect in enumerate(model_base):\\n+ target = fill.copy().set_fill(BLUE, opacity=0.8)\\n+ target.move_to(rect)\\n+ model_arr.append(target)\\n+\\n+ cpu_target = Rectangle(height=0.46,width=0.46).set_stroke(width=0.).set_fill(BLUE, opacity=0.8)\\n+ cpu_target.move_to(cpu_left_col_base[i])\\n+ model_cpu_arr.append(cpu_target)\\n+\\n+ self.add(*model_arr, *model_cpu_arr)\\n+\\n+ disk_left_col_base = [meta_mem.copy() for i in range(6)]\\n+ disk_right_col_base = [meta_mem.copy() for i in range(6)]\\n+ disk_left_col = VGroup(*disk_left_col_base).arrange(UP, buff=0)\\n+ disk_right_col = VGroup(*disk_right_col_base).arrange(UP, buff=0)\\n+ disk_rects = VGroup(disk_left_col,disk_right_col).arrange(RIGHT, buff=0)\\n+ disk_text = Text(\"Disk\", font_size=24)\\n+ disk = Group(disk_rects,disk_text).arrange(DOWN, buff=0.5, aligned_edge=DOWN)\\n+ disk.move_to([-4,-1.25,0])\\n+ self.add(disk_text, disk_rects)\\n+\\n+ key = Square(side_length=2.2)\\n+ key.move_to([-5, 2, 0])\\n+\\n+ key_text = MarkupText(\\n+ f\"Key:\\\\n\\\\n Empty Model\",\\n+ font_size=18,\\n+ )\\n+\\n+ key_text.move_to([-5, 2.4, 0])\\n+\\n+ self.add(key_text, key)\\n+\\n+ blue_text = MarkupText(\\n+ f\" Checkpoint\",\\n+ font_size=18,\\n+ )\\n+\\n+ blue_text.next_to(key_text, DOWN*2.4, aligned_edge=key_text.get_left())\\n+ self.add(blue_text)\\n+\\n+ step_6 = MarkupText(\\n+ f\\'Now watch as an input is passed through the model\\\\nand how the memory is utilized and handled.\\', \\n+ font_size=24\\n+ )\\n+ step_6.move_to([2, 2, 0])\\n+\\n+ self.play(Write(step_6))\\n+\\n+ input = Square(0.3)\\n+ input.set_fill(RED, opacity=1.)\\n+ input.set_stroke(width=0.)\\n+ input.next_to(model_base[0], LEFT, buff=.5)\\n+\\n+ self.play(Write(input))\\n+\\n+ input.generate_target()\\n+ input.target.next_to(model_arr[0], direction=LEFT, buff=0.02)\\n+ self.play(MoveToTarget(input))\\n+\\n+ self.play(FadeOut(step_6))\\n+\\n+\\n+ a = Arrow(start=UP, end=DOWN, color=RED, buff=.5)\\n+ a.next_to(model_arr[0].get_left(), UP, buff=0.2)\\n+\\n+ model_cpu_arr[0].generate_target()\\n+ model_cpu_arr[0].target.move_to(gpu_rect[0])\\n+\\n+ step_7 = MarkupText(\\n+ f\\'As the input reaches a layer, the hook triggers\\\\nand weights are moved from the CPU\\\\nto the GPU and back.\\', \\n+ font_size=24\\n+ )\\n+ step_7.move_to([2, 2, 0])\\n+\\n+ self.play(Write(step_7, run_time=3))\\n+\\n+ circ_kwargs = {\"run_time\":1, \"fade_in\":True, \"fade_out\":True, \"buff\":0.02}\\n+\\n+ self.play(\\n+ Write(a), \\n+ Circumscribe(model_arr[0], color=ORANGE, **circ_kwargs),\\n+ Circumscribe(model_cpu_arr[0], color=ORANGE, **circ_kwargs),\\n+ Circumscribe(gpu_rect[0], color=ORANGE, **circ_kwargs),\\n+ )\\n+ self.play(\\n+ MoveToTarget(model_cpu_arr[0])\\n+ )\\n+\\n+ a_c = a.copy()\\n+ for i in range(6):\\n+ a_c.next_to(model_arr[i].get_right()+0.02, UP, buff=0.2)\\n+\\n+ input.generate_target()\\n+ input.target.move_to(model_arr[i].get_right()+0.02)\\n+\\n+ grp = AnimationGroup(\\n+ FadeOut(a, run_time=.5), \\n+ MoveToTarget(input, run_time=.5), \\n+ FadeIn(a_c, run_time=.5),\\n+ lag_ratio=0.2\\n+ )\\n+\\n+ self.play(grp)\\n+\\n+\\n+ model_cpu_arr[i].generate_target()\\n+ model_cpu_arr[i].target.move_to(cpu_left_col_base[i])\\n+\\n+\\n+ if i < 5:\\n+ model_cpu_arr[i+1].generate_target()\\n+ model_cpu_arr[i+1].target.move_to(gpu_rect[0])\\n+ if i >= 1:\\n+ circ_kwargs[\"run_time\"] = .7\\n+\\n+ self.play(\\n+ Circumscribe(model_arr[i], **circ_kwargs),\\n+ Circumscribe(cpu_left_col_base[i], **circ_kwargs),\\n+ Circumscribe(cpu_left_col_base[i+1], color=ORANGE, **circ_kwargs), \\n+ Circumscribe(gpu_rect[0], color=ORANGE, **circ_kwargs),\\n+ Circumscribe(model_arr[i+1], color=ORANGE, **circ_kwargs),\\n+ )\\n+ if i < 1:\\n+ self.play(\\n+ MoveToTarget(model_cpu_arr[i]), \\n+ MoveToTarget(model_cpu_arr[i+1]),\\n+ )\\n+ else:\\n+ self.play(\\n+ MoveToTarget(model_cpu_arr[i], run_time=.7), \\n+ MoveToTarget(model_cpu_arr[i+1], run_time=.7),\\n+ )\\n+ else:\\n+ model_cpu_arr[i].generate_target()\\n+ model_cpu_arr[i].target.move_to(cpu_left_col_base[-1])\\n+ input.generate_target()\\n+ input.target.next_to(model_arr[-1].get_right(), RIGHT+0.02, buff=0.2)\\n+\\n+ self.play(\\n+ Circumscribe(model_arr[-1], color=ORANGE, **circ_kwargs),\\n+ Circumscribe(cpu_left_col_base[-1], color=ORANGE, **circ_kwargs),\\n+ Circumscribe(gpu_rect[0], color=ORANGE, **circ_kwargs),\\n+ )\\n+\\n+ self.play(\\n+ MoveToTarget(model_cpu_arr[i])\\n+ )\\n+\\n+ a = a_c\\n+ a_c = a_c.copy()\\n+\\n+ input.generate_target()\\n+ input.target.next_to(model_base[-1], RIGHT+0.02, buff=.5)\\n+ self.play(\\n+ FadeOut(step_7),\\n+ FadeOut(a, run_time=.5), \\n+ )\\n+\\n+ step_8 = MarkupText(\\n+ f\\'Inference on a model too large for GPU memory\\\\nis successfully completed.\\', font_size=24\\n+ )\\n+ step_8.move_to([2, 2, 0])\\n+\\n+ self.play(\\n+ Write(step_8, run_time=3),\\n+ MoveToTarget(input)\\n+ )\\n+\\n+ self.wait()\\n\\\\ No newline at end of file\\n',\n", - " 'code_comments': [],\n", - " 'context': [{'body': '_The documentation is not available anymore as the PR was closed or merged._',\n", - " 'from_author': False}],\n", - " 'url': 'https://api.github.com/repos/huggingface/accelerate/pulls/671'},\n", - " 1043736990: {'diff': 'diff --git a/src/accelerate/utils/imports.py b/src/accelerate/utils/imports.py\\nindex 074d02e4a..3efb61034 100644\\n--- a/src/accelerate/utils/imports.py\\n+++ b/src/accelerate/utils/imports.py\\n@@ -14,6 +14,7 @@\\n \\n import importlib\\n import sys\\n+from functools import lru_cache\\n \\n import torch\\n \\n@@ -50,6 +51,7 @@ def is_apex_available():\\n return importlib.util.find_spec(\"apex\") is not None\\n \\n \\n+@lru_cache()\\n def is_tpu_available(check_device=True):\\n \"Checks if `torch_xla` is installed and potentially if a TPU is in the environment\"\\n if _tpu_available and check_device:\\n',\n", - " 'code_comments': [],\n", - " 'context': [{'body': '_The documentation is not available anymore as the PR was closed or merged._',\n", - " 'from_author': False}],\n", - " 'url': 'https://api.github.com/repos/huggingface/accelerate/pulls/670'},\n", - " 1040964232: {'diff': 'diff --git a/docs/source/_toctree.yml b/docs/source/_toctree.yml\\nindex 8160cb4ee..6b9825523 100644\\n--- a/docs/source/_toctree.yml\\n+++ b/docs/source/_toctree.yml\\n@@ -35,6 +35,8 @@\\n title: Using 🤗 Accelerate on SageMaker\\n - local: usage_guides/mps\\n title: How to use Apple Silicon M1 GPUs\\n+ - local: usage_guides/megatron_lm\\n+ title: How to use Megatron-LM\\n - local: usage_guides/training_zoo\\n title: 🤗 Accelerate Example Zoo\\n title: How-To Guides\\n@@ -71,4 +73,6 @@\\n title: Kwargs handlers\\n - local: package_reference/utilities\\n title: Utility functions and classes\\n+ - local: package_reference/megatron_lm\\n+ title: Megatron-LM Utilities\\n title: \"Reference\"\\n\\\\ No newline at end of file\\ndiff --git a/docs/source/package_reference/megatron_lm.mdx b/docs/source/package_reference/megatron_lm.mdx\\nnew file mode 100644\\nindex 000000000..b59768bec\\n--- /dev/null\\n+++ b/docs/source/package_reference/megatron_lm.mdx\\n@@ -0,0 +1,29 @@\\n+\\n+\\n+# Utilities for Megatron-LM\\n+\\n+[[autodoc]] utils.MegatronLMPlugin\\n+\\n+[[autodoc]] utils.MegatronLMDummyScheduler\\n+\\n+[[autodoc]] utils.MegatronLMDummyDataLoader\\n+\\n+[[autodoc]] utils.AbstractTrainStep\\n+\\n+[[autodoc]] utils.GPTTrainStep\\n+\\n+[[autodoc]] utils.BertTrainStep\\n+\\n+[[autodoc]] utils.T5TrainStep\\n+\\n+[[autodoc]] utils.avg_losses_across_data_parallel_group\\ndiff --git a/docs/source/usage_guides/megatron_lm.mdx b/docs/source/usage_guides/megatron_lm.mdx\\nnew file mode 100644\\nindex 000000000..188dddb32\\n--- /dev/null\\n+++ b/docs/source/usage_guides/megatron_lm.mdx\\n@@ -0,0 +1,507 @@\\n+\\n+\\n+\\n+# Megatron-LM\\n+\\n+[Megatron-LM](https://github.com/NVIDIA/Megatron-LM) enables training large transformer language models at scale.\\n+It provides efficient tensor, pipeline and sequence based model parallelism for pre-training transformer based\\n+Language Models such as [GPT](https://arxiv.org/abs/2005.14165) (Decoder Only), [BERT](https://arxiv.org/pdf/1810.04805.pdf) (Encoder Only) and [T5](https://arxiv.org/abs/1910.10683) (Encoder-Decoder).\\n+For detailed information and how things work behind the scene please refer the github [repo](https://github.com/NVIDIA/Megatron-LM).\\n+\\n+## What is integrated?\\n+\\n+Accelerate integrates following feature of Megatron-LM to enable large scale pre-training/finetuning\\n+of BERT (Encoder), GPT (Decoder) or T5 models (Encoder and Decoder):\\n+\\n+a. **Tensor Parallelism (TP)**: Reduces memory footprint without much additional communication on intra-node ranks\\n+\\n+b. **Pipeline Parallelism (PP)**: Reduces memory footprint and enables large scale training via inter-node parallelization. \\n+Reduces the bubble of naive PP via PipeDream-Flush schedule/1F1B schedule and Interleaved 1F1B schedule. \\n+Layers are distributed uniformly across PP stages.\\n+\\n+c. **Sequence Parallelism (SP)**: Reduces memory footprint without any additional communication. Only applicable when using TP.\\n+\\n+d. **Data Parallelism (DP)** via Distributed Optimizer: Reduces memory footprint by sharding optimizer states and gradients across DP ranks.\\n+\\n+e. **Selective Activation Recomputation**: Reduces the memory footprint of activations significantly via smart activation checkpointing.\\n+It doesn\\'t store activations occupying large memory while being fast to recompute thereby achieving great tradeoff between memory and recomputation.\\n+\\n+f. **Fused Kernels**: Fused Softmax, Mixed Precision Fused Layer Norm and Fused gradient accumulation to weight gradient computation of linear layer.\\n+PyTorch JIT compiled Fused GeLU and Fused Bias+Dropout+Residual addition.\\n+\\n+g. **Support for Indexed datasets**: Efficient binary format of datasets for large scale training. Support for the `mmap`, `cached` index file and the `lazy` loader format.\\n+\\n+h. **Checkpoint reshaping and interoperability**: Utility for reshaping Megatron-LM checkpoints of variable \\n+tensor and pipeline parallel sizes to the beloved 🤗 Transformers sharded checkpoints as it has great support with plethora of tools\\n+such as 🤗 Accelerate Big Model Inference, Megatron-DeepSpeed Inference etc. \\n+Support is also available for converting 🤗 Transformers sharded checkpoints to Megatron-LM checkpoint of variable tensor and pipeline parallel sizes\\n+for large scale training. \\n+\\n+\\n+## Pre-Requisites \\n+\\n+You will need to install the latest pytorch, cuda, nccl, and NVIDIA [APEX](https://github.com/NVIDIA/apex#quick-start) releases and the nltk library.\\n+See [documentation](https://github.com/NVIDIA/Megatron-LM#setup) for more details. \\n+Another way to setup the environment is to pull an NVIDIA PyTorch Container that comes with all the required installations from NGC.\\n+\\n+Below is a step-by-step method to set up the conda environment:\\n+\\n+1. Create a virtual environment\\n+```\\n+conda create --name ml\\n+```\\n+\\n+2. Assuming that the machine has CUDA 11.3 installed, installing the corresponding PyTorch GPU Version\\n+```\\n+conda install pytorch torchvision torchaudio cudatoolkit=11.3 -c pytorch\\n+```\\n+\\n+3. Install Nvidia APEX\\n+```\\n+git clone https://github.com/NVIDIA/apex\\n+cd apex\\n+pip install -v --disable-pip-version-check --no-cache-dir --global-option=\"--cpp_ext\" --global-option=\"--cuda_ext\" ./\\n+cd ..\\n+```\\n+\\n+4. Installing Megatron-LM\\n+\\n+ a. Cloning the Megatron-LM repo\\n+ ```\\n+ git clone https://github.com/NVIDIA/Megatron-LM.git\\n+ cd Megatron-LM\\n+ ```\\n+\\n+ b. Create a file `setup.py`, paste the below code and put in the root folder\\n+ ```python\\n+ \"\"\"Setup for pip package.\"\"\"\\n+\\n+ import os\\n+ import sys\\n+ import setuptools\\n+\\n+ if sys.version_info < (3,):\\n+ raise Exception(\"Python 2 is not supported by Megatron.\")\\n+\\n+ with open(\"README.md\", \"r\") as fh:\\n+ long_description = fh.read()\\n+\\n+ setuptools.setup(\\n+ name=\"megatron-lm\",\\n+ version=\"3.0.0\",\\n+ description=\"Megatron-LM: Training Multi-Billion Parameter Language Models Using Model Parallelism.\",\\n+ long_description=long_description,\\n+ long_description_content_type=\"text/markdown\",\\n+ # The project\\'s main homepage.\\n+ url=\"https://github.com/NVIDIA/Megatron-LM\",\\n+ author=\"NVIDIA INC\",\\n+ maintainer=\"NVIDIA INC\",\\n+ # The licence under which the project is released\\n+ license=\"See https://github.com/NVIDIA/Megatron-LM/blob/master/LICENSE\",\\n+ classifiers=[\\n+ \"Intended Audience :: Developers\",\\n+ \"Intended Audience :: Science/Research\",\\n+ \"Intended Audience :: Information Technology\",\\n+ # Indicate what your project relates to\\n+ \"Topic :: Scientific/Engineering :: Artificial Intelligence\",\\n+ \"Topic :: Software Development :: Libraries :: Python Modules\",\\n+ # Additional Setting\\n+ \"Environment :: Console\",\\n+ \"Natural Language :: English\",\\n+ \"Operating System :: OS Independent\",\\n+ ],\\n+ python_requires=\">=3.6\",\\n+ packages=setuptools.find_packages(),\\n+ install_requires=[\"nltk\", \"six\", \"regex\", \"torch>=1.12.0\", \"pybind11\"],\\n+ # Add in any packaged data.\\n+ include_package_data=True,\\n+ zip_safe=False,\\n+ # PyPI package information.\\n+ keywords=\"deep learning, Megatron, gpu, NLP, nvidia, pytorch, torch, language\",\\n+ )\\n+ ```\\n+\\n+ c. installing via below command\\n+ ```\\n+ pip install \".\"\\n+ ```\\n+\\n+## Accelerate Megatron-LM Plugin\\n+\\n+Important features are directly supported via the `accelerate config` command. \\n+An example of thr corresponding questions for using Megatron-LM features is shown below:\\n+\\n+```bash\\n+:~$ accelerate config --config_file \"megatron_gpt_config.yaml\"\\n+In which compute environment are you running? ([0] This machine, [1] AWS (Amazon SageMaker)): 0\\n+Which type of machine are you using? ([0] No distributed training, [1] multi-CPU, [2] multi-GPU, [3] TPU [4] MPS): 2\\n+How many different machines will you use (use more than 1 for multi-node training)? [1]: \\n+Do you want to use DeepSpeed? [yes/NO]: \\n+Do you want to use FullyShardedDataParallel? [yes/NO]: \\n+Do you want to use Megatron-LM ? [yes/NO]: yes\\n+What is the Tensor Parallelism degree/size? [1]:2\\n+Do you want to enable Sequence Parallelism? [YES/no]: \\n+What is the Pipeline Parallelism degree/size? [1]:2\\n+What is the number of micro-batches? [1]:2\\n+Do you want to enable selective activation recomputation? [YES/no]: \\n+Do you want to use distributed optimizer which shards optimizer state and gradients across data pralellel ranks? [YES/no]: \\n+What is the gradient clipping value based on global L2 Norm (0 to disable)? [1.0]: \\n+How many GPU(s) should be used for distributed training? [1]:8\\n+Do you wish to use FP16 or BF16 (mixed precision)? [NO/fp16/bf16]: bf16\\n+```\\n+\\n+The resulting config is shown below:\\n+\\n+```\\n+~$ cat megatron_gpt_config.yaml \\n+compute_environment: LOCAL_MACHINE\\n+deepspeed_config: {}\\n+distributed_type: MEGATRON_LM\\n+downcast_bf16: \\'no\\'\\n+fsdp_config: {}\\n+machine_rank: 0\\n+main_process_ip: null\\n+main_process_port: null\\n+main_training_function: main\\n+megatron_lm_config:\\n+ megatron_lm_gradient_clipping: 1.0\\n+ megatron_lm_num_micro_batches: 2\\n+ megatron_lm_pp_degree: 2\\n+ megatron_lm_recompute_activations: true\\n+ megatron_lm_sequence_parallelism: true\\n+ megatron_lm_tp_degree: 2\\n+ megatron_lm_use_distributed_optimizer: true\\n+mixed_precision: bf16\\n+num_machines: 1\\n+num_processes: 8\\n+rdzv_backend: static\\n+same_network: true\\n+use_cpu: false\\n+```\\n+\\n+We will take the example of GPT pre-training. The minimal changes required to the official `run_clm_no_trainer.py` \\n+to use Megatron-LM are as follows:\\n+\\n+1. As Megatron-LM uses its own implementation of Optimizer, the corresponding scheduler compatible with it needs to be used.\\n+As such, support for only the Megatron-LM\\'s scheduler is present. User will need to create `accelerate.utils.MegatronLMDummyScheduler`.\\n+Example is given below:\\n+\\n+```python\\n+from accelerate.utils import MegatronLMDummyScheduler\\n+\\n+if accelerator.distributed_type == DistributedType.MEGATRON_LM:\\n+ lr_scheduler = MegatronLMDummyScheduler(\\n+ optimizer=optimizer,\\n+ total_num_steps=args.max_train_steps,\\n+ warmup_num_steps=args.num_warmup_steps,\\n+ )\\n+else:\\n+ lr_scheduler = get_scheduler(\\n+ name=args.lr_scheduler_type,\\n+ optimizer=optimizer,\\n+ num_warmup_steps=args.num_warmup_steps * args.gradient_accumulation_steps,\\n+ num_training_steps=args.max_train_steps * args.gradient_accumulation_steps,\\n+ )\\n+```\\n+\\n+2. Getting the details of the total batch size now needs to be cognization of tensor and pipeline parallel sizes.\\n+Example of getting the effective total batch size is shown below:\\n+\\n+```python\\n+if accelerator.distributed_type == DistributedType.MEGATRON_LM:\\n+ total_batch_size = accelerator.state.megatron_lm_plugin.global_batch_size\\n+else:\\n+ total_batch_size = args.per_device_train_batch_size * accelerator.num_processes * args.gradient_accumulation_steps\\n+```\\n+\\n+3. When using Megatron-LM, the losses are already averaged across the data parallel group\\n+\\n+```python\\n+if accelerator.distributed_type == DistributedType.MEGATRON_LM:\\n+ losses.append(loss)\\n+else:\\n+ losses.append(accelerator.gather_for_metrics(loss.repeat(args.per_device_eval_batch_size)))\\n+\\n+if accelerator.distributed_type == DistributedType.MEGATRON_LM:\\n+ losses = torch.tensor(losses)\\n+else:\\n+ losses = torch.cat(losses)\\n+```\\n+\\n+4. For Megatron-LM, we need to save the model using `accelerator.save_state`\\n+\\n+```python\\n+if accelerator.distributed_type == DistributedType.MEGATRON_LM:\\n+ accelerator.save_state(args.output_dir)\\n+else:\\n+ unwrapped_model = accelerator.unwrap_model(model)\\n+ unwrapped_model.save_pretrained(\\n+ args.output_dir, is_main_process=accelerator.is_main_process, save_function=accelerator.save\\n+ )\\n+```\\n+\\n+That\\'s it! We are good to go 🚀. Please find the example script in the examples folder at the path `accelerate/examples/by_feature/megatron_lm_gpt_pretraining.py`.\\n+Let\\'s run it for `gpt-large` model architecture using 4 A100-80GB GPUs.\\n+\\n+```bash\\n+accelerate launch --config_file megatron_gpt_config.yaml \\\\\\n+examples/by_feature/megatron_lm_gpt_pretraining.py \\\\\\n+--config_name \"gpt2-large\" \\\\\\n+--tokenizer_name \"gpt2-large\" \\\\\\n+--dataset_name wikitext \\\\\\n+--dataset_config_name wikitext-2-raw-v1 \\\\\\n+--block_size 1024 \\\\\\n+--learning_rate 5e-5 \\\\\\n+--per_device_train_batch_size 4 \\\\\\n+--per_device_eval_batch_size 4 \\\\\\n+--num_train_epochs 5 \\\\\\n+--with_tracking \\\\\\n+--report_to \"wandb\" \\\\\\n+--output_dir \"awesome_model\"\\n+```\\n+\\n+Below are some important excerpts from the output logs:\\n+\\n+```bash\\n+Loading extension module fused_dense_cuda...\\n+>>> done with compiling and loading fused kernels. Compilation time: 3.569 seconds\\n+ > padded vocab (size: 50257) with 175 dummy tokens (new size: 50432)\\n+Building gpt model in the pre-training mode.\\n+The Megatron LM model weights are initialized at random in `accelerator.prepare`. Please use `accelerator.load_checkpoint` to load a pre-trained checkpoint matching the distributed setup.\\n+Preparing dataloader\\n+Preparing dataloader\\n+Preparing model\\n+ > number of parameters on (tensor, pipeline) model parallel rank (1, 0): 210753280\\n+ > number of parameters on (tensor, pipeline) model parallel rank (1, 1): 209445120\\n+ > number of parameters on (tensor, pipeline) model parallel rank (0, 0): 210753280\\n+ > number of parameters on (tensor, pipeline) model parallel rank (0, 1): 209445120\\n+Preparing optimizer\\n+Preparing scheduler\\n+> learning rate decay style: linear\\n+10/10/2022 22:57:22 - INFO - __main__ - ***** Running training *****\\n+10/10/2022 22:57:22 - INFO - __main__ - Num examples = 2318\\n+10/10/2022 22:57:22 - INFO - __main__ - Num Epochs = 5\\n+10/10/2022 22:57:22 - INFO - __main__ - Instantaneous batch size per device = 24\\n+10/10/2022 22:57:22 - INFO - __main__ - Total train batch size (w. parallel, distributed & accumulation) = 48\\n+10/10/2022 22:57:22 - INFO - __main__ - Gradient Accumulation steps = 1\\n+10/10/2022 22:57:22 - INFO - __main__ - Total optimization steps = 245\\n+ 20%|████████████▍ | 49/245 [01:04<04:09, 1.27s/it]\\n+ 10/10/2022 22:58:29 - INFO - __main__ - epoch 0: perplexity: 1222.1594275215962 eval_loss: 7.10837459564209\\n+ 40%|████████████████████████▊ | 98/245 [02:10<03:07, 1.28s/it]\\n+ 10/10/2022 22:59:35 - INFO - __main__ - epoch 1: perplexity: 894.5236583794557 eval_loss: 6.796291351318359\\n+ 60%|████████████████████████████████████▌ | 147/245 [03:16<02:05, 1.28s/it]\\n+ 10/10/2022 23:00:40 - INFO - __main__ - epoch 2: perplexity: 702.8458788508042 eval_loss: 6.555137634277344\\n+ 80%|████████████████████████████████████████████████▊ | 196/245 [04:22<01:02, 1.28s/it]\\n+ 10/10/2022 23:01:46 - INFO - __main__ - epoch 3: perplexity: 600.3220028695281 eval_loss: 6.39746618270874\\n+100%|█████████████████████████████████████████████████████████████| 245/245 [05:27<00:00, 1.28s/it]\\n+```\\n+\\n+There are a large number of other options/features that one can set using `accelerate.utils.MegatronLMPlugin`.\\n+\\n+## Advanced features to leverage writing custom train step and Megatron-LM Indexed Datasets\\n+\\n+For leveraging more features, please go through below details.\\n+\\n+1. Below is an example of changes required to customize the Train Step while using Megatron-LM. \\n+You will implement the `accelerate.utils.AbstractTrainStep` or inherit from their corresponding children \\n+`accelerate.utils.GPTTrainStep`, `accelerate.utils.BertTrainStep` or `accelerate.utils.T5TrainStep`.\\n+\\n+```python\\n+from accelerate.utils import MegatronLMDummyScheduler, GPTTrainStep, avg_losses_across_data_parallel_group\\n+\\n+# Custom loss function for the Megatron model\\n+class GPTTrainStepWithCustomLoss(GPTTrainStep):\\n+ def __init__(self, megatron_args, **kwargs):\\n+ super().__init__(megatron_args)\\n+ self.kwargs = kwargs\\n+\\n+ def get_loss_func(self):\\n+ def loss_func(inputs, loss_mask, output_tensor):\\n+ batch_size, seq_length = output_tensor.shape\\n+ losses = output_tensor.float()\\n+ loss_mask = loss_mask.view(-1).float()\\n+ loss = losses.view(-1) * loss_mask\\n+\\n+ # Resize and average loss per sample\\n+ loss_per_sample = loss.view(batch_size, seq_length).sum(axis=1)\\n+ loss_mask_per_sample = loss_mask.view(batch_size, seq_length).sum(axis=1)\\n+ loss_per_sample = loss_per_sample / loss_mask_per_sample\\n+\\n+ # Calculate and scale weighting\\n+ weights = torch.stack([(inputs == kt).float() for kt in self.kwargs[\"keytoken_ids\"]]).sum(axis=[0, 2])\\n+ weights = 1.0 + self.kwargs[\"alpha\"] * weights\\n+ # Calculate weighted average\\n+ weighted_loss = (loss_per_sample * weights).mean()\\n+\\n+ # Reduce loss across data parallel groups\\n+ averaged_loss = avg_losses_across_data_parallel_group([weighted_loss])\\n+\\n+ return weighted_loss, {\"lm loss\": averaged_loss[0]}\\n+\\n+ return loss_func\\n+\\n+ def get_forward_step_func(self):\\n+ def forward_step(data_iterator, model):\\n+ \"\"\"Forward step.\"\"\"\\n+ # Get the batch.\\n+ tokens, labels, loss_mask, attention_mask, position_ids = self.get_batch(data_iterator)\\n+ output_tensor = model(tokens, position_ids, attention_mask, labels=labels)\\n+\\n+ return output_tensor, partial(self.loss_func, tokens, loss_mask)\\n+\\n+ return forward_step\\n+\\n+\\n+def main():\\n+ # Custom loss function for the Megatron model\\n+ keytoken_ids = []\\n+ keywords = [\"plt\", \"pd\", \"sk\", \"fit\", \"predict\", \" plt\", \" pd\", \" sk\", \" fit\", \" predict\"]\\n+ for keyword in keywords:\\n+ ids = tokenizer([keyword]).input_ids[0]\\n+ if len(ids) == 1:\\n+ keytoken_ids.append(ids[0])\\n+ accelerator.print(f\"Keytoken ids: {keytoken_ids}\")\\n+ accelerator.state.megatron_lm_plugin.custom_train_step_class = GPTTrainStepWithCustomLoss\\n+ accelerator.state.megatron_lm_plugin.custom_train_step_kwargs = {\\n+ \"keytoken_ids\": keytoken_ids,\\n+ \"alpha\": 0.25,\\n+ }\\n+```\\n+\\n+2. For using the Megatron-LM datasets, a few more changes are required. Dataloaders for these datasets\\n+are available only on rank 0 of each tensor parallel group. As such, there are rank where dataloader won\\'t be\\n+avaiable and this requires tweaks to the training loop. Being able to do all this shows how\\n+felixble and extensible 🤗 Accelerate is. The changes required are as follows.\\n+\\n+a. For Megatron-LM indexed datasets, we need to use `MegatronLMDummyDataLoader` \\n+and pass the required dataset args to it such as `data_path`, `seq_length` etc. \\n+See [here](https://github.com/NVIDIA/Megatron-LM/blob/main/megatron/arguments.py#L804) for the list of available args. \\n+ \\n+```python\\n+from accelerate.utils import MegatronLMDummyDataLoader\\n+\\n+megatron_dataloader_config = {\\n+ \"data_path\": args.data_path,\\n+ \"splits_string\": args.splits_string,\\n+ \"seq_length\": args.block_size,\\n+ \"micro_batch_size\": args.per_device_train_batch_size,\\n+}\\n+megatron_dataloader = MegatronLMDummyDataLoader(**megatron_dataloader_config)\\n+accelerator.state.megatron_lm_plugin.megatron_dataset_flag = True\\n+```\\n+\\n+b. `megatron_dataloader` is repeated 3 times to get training, validation and test dataloaders\\n+as per the `args.splits_string` proportions\\n+ \\n+```python\\n+model, optimizer, lr_scheduler, train_dataloader, eval_dataloader, _ = accelerator.prepare(\\n+ model, optimizer, lr_scheduler, megatron_dataloader, megatron_dataloader, megatron_dataloader\\n+)\\n+```\\n+\\n+c. Changes to training and evaluation loops as dataloader is only available on tensor parallel ranks 0\\n+So, we need to iterate only if the dataloader isn\\'t `None` else provide empty dict\\n+As such, we loop using `while` loop and break when `completed_steps` is equal to `args.max_train_steps`\\n+This is similar to the Megatron-LM setup wherein user has to provide `max_train_steps` when using Megaton-LM indexed datasets.\\n+This displays how flexible and extensible 🤗 Accelerate is.\\n+\\n+```python\\n+while completed_steps < args.max_train_steps:\\n+ model.train()\\n+ batch = next(train_dataloader) if train_dataloader is not None else {}\\n+ outputs = model(**batch)\\n+ loss = outputs.loss\\n+ ...\\n+\\n+ if completed_steps % eval_interval == 0:\\n+ eval_completed_steps = 0\\n+ losses = []\\n+ while eval_completed_steps < eval_iters:\\n+ model.eval()\\n+ with torch.no_grad():\\n+ batch = next(eval_dataloader) if eval_dataloader is not None else {}\\n+ outputs = model(**batch)\\n+```\\n+\\n+ \\n+## Utility for Checkpoint reshaping and interoperability\\n+\\n+1. The scripts for these are present in 🤗 Transformers library under respective models. \\n+Currently, it is available for GPT model [checkpoint_reshaping_and_interoperability.py](https://github.com/huggingface/transformers/blob/main/src/transformers/models/megatron_gpt2/checkpoint_reshaping_and_interoperability.py)\\n+\\n+2. Below is an example of conversion of checkpoint from Megatron-LM to universal 🤗 Transformers sharded checkpoint.\\n+```bash\\n+python checkpoint_reshaping_and_interoperability.py \\\\\\n+--convert_checkpoint_from_megatron_to_transformers \\\\\\n+--load_path \"gpt/iter_0005000\" \\\\\\n+--save_path \"gpt/trfs_checkpoint\" \\\\\\n+--max_shard_size \"200MB\" \\\\\\n+--tokenizer_name \"gpt2\" \\\\\\n+--print-checkpoint-structure\\n+```\\n+\\n+3. Conversion of checkpoint from transformers to megatron with `tp_size=2`, `pp_size=2` and `dp_size=2`.\\n+```bash\\n+python checkpoint_utils/megatgron_gpt2/checkpoint_reshaping_and_interoperability.py \\\\\\n+--load_path \"gpt/trfs_checkpoint\" \\\\\\n+--save_path \"gpt/megatron_lm_checkpoint\" \\\\\\n+--target_tensor_model_parallel_size 2 \\\\\\n+--target_pipeline_model_parallel_size 2 \\\\\\n+--target_data_parallel_size 2 \\\\\\n+--target_params_dtype \"bf16\" \\\\\\n+--make_vocab_size_divisible_by 128 \\\\\\n+--use_distributed_optimizer \\\\\\n+--print-checkpoint-structure\\n+```\\n+\\n+## Caveats\\n+\\n+1. Supports Transformers GPT2, Megatron-BERT and T5 models.\\n+This covers Decoder only, Encode only and Encoder-Decoder model classes.\\n+\\n+2. Only loss is returned from model forward pass as \\n+there is quite complex interplay of pipeline, tensor and data parallelsim behind the scenes.\\n+The `model(**batch_data)` call return loss(es) averaged across the data parallel ranks.\\n+This is fine for most cases wherein pre-training jobs are run using Megatron-LM features and\\n+you can easily compute the `perplexity` using the loss. \\n+\\n+3. The main process is the last rank as the losses are available in the last stage of pipeline.\\n+`accelerator.is_main_process` and `accelerator.is_local_main_process` return `True` for last rank when using \\n+Megatron-LM integration.\\n+\\n+4. In `accelerator.prepare` call, a Megatron-LM model corresponding to a given Transformers model is created\\n+with random weights. Please use `accelerator.load_state` to load the Megatron-LM checkpoint with matching TP, PP and DP partitions.\\n+\\n+5. Currently, checkpoint reshaping and interoperability support is only available for GPT. \\n+Soon it will be extended to BERT and T5.\\n+\\n+6. `gradient_accumulation_steps` needs to be 1. When using Megatron-LM, micro batches in pipeline parallelism \\n+setting is synonymous with gradient accumulation. \\n+\\n+7. When using Megatron-LM, use `accelerator.save_state` and `accelerator.load_state` for saving and loading checkpoints.\\n+\\n+8. Below are the mapping from Megatron-LM model architectures to the the equivalent 🤗 transformers model architectures.\\n+Only these 🤗 transformers model architectures are supported.\\n+\\n+a. Megatron-LM [BertModel](https://github.com/NVIDIA/Megatron-LM/blob/main/megatron/model/bert_model.py) : \\n+🤗 transformers models with `megatron-bert` in config\\'s model type, e.g., \\n+[MegatronBERT](https://huggingface.co/docs/transformers/model_doc/megatron-bert)\\n+ \\n+b. Megatron-LM [GPTModel](https://github.com/NVIDIA/Megatron-LM/blob/main/megatron/model/gpt_model.py) : \\n+🤗 transformers models with `gpt2` in config\\'s model type, e.g., \\n+[OpenAI GPT2](https://huggingface.co/docs/transformers/model_doc/gpt2)\\n+ \\n+c. Megatron-LM [T5Model](https://github.com/NVIDIA/Megatron-LM/blob/main/megatron/model/t5_model.py) : \\n+🤗 transformers models with `t5` in config\\'s model type, e.g., \\n+[T5](https://huggingface.co/docs/transformers/model_doc/t5) and \\n+[MT5](https://huggingface.co/docs/transformers/model_doc/mt5)\\n\\\\ No newline at end of file\\ndiff --git a/examples/by_feature/megatron_lm_gpt_pretraining.py b/examples/by_feature/megatron_lm_gpt_pretraining.py\\nnew file mode 100644\\nindex 000000000..68a425e4d\\n--- /dev/null\\n+++ b/examples/by_feature/megatron_lm_gpt_pretraining.py\\n@@ -0,0 +1,699 @@\\n+#!/usr/bin/env python\\n+# coding=utf-8\\n+# Copyright 2021 The HuggingFace Inc. team. All rights reserved.\\n+#\\n+# Licensed under the Apache License, Version 2.0 (the \"License\");\\n+# you may not use this file except in compliance with the License.\\n+# You may obtain a copy of the License at\\n+#\\n+# http://www.apache.org/licenses/LICENSE-2.0\\n+#\\n+# Unless required by applicable law or agreed to in writing, software\\n+# distributed under the License is distributed on an \"AS IS\" BASIS,\\n+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\\n+# See the License for the specific language governing permissions and\\n+# limitations under the License.\\n+\"\"\"\\n+Fine-tuning the library models for causal language modeling (GPT, GPT-2, CTRL, ...)\\n+on a text file or a dataset without using HuggingFace Trainer.\\n+\\n+Here is the full list of checkpoints on the hub that can be fine-tuned by this script:\\n+https://huggingface.co/models?filter=text-generation\\n+\"\"\"\\n+# You can also adapt this script on your own causal language modeling task. Pointers for this are left as comments.\\n+\\n+import argparse\\n+import json\\n+import logging\\n+import math\\n+import os\\n+import random\\n+from itertools import chain\\n+from pathlib import Path\\n+\\n+import torch\\n+from torch.utils.data import DataLoader\\n+\\n+import datasets\\n+import transformers\\n+from accelerate import Accelerator, DistributedType\\n+from accelerate.logging import get_logger\\n+from accelerate.utils import MegatronLMDummyScheduler, set_seed\\n+from datasets import load_dataset\\n+from huggingface_hub import Repository\\n+from tqdm.auto import tqdm\\n+from transformers import (\\n+ CONFIG_MAPPING,\\n+ MODEL_MAPPING,\\n+ AutoConfig,\\n+ AutoModelForCausalLM,\\n+ AutoTokenizer,\\n+ SchedulerType,\\n+ default_data_collator,\\n+ get_scheduler,\\n+)\\n+from transformers.utils import check_min_version, get_full_repo_name, send_example_telemetry\\n+from transformers.utils.versions import require_version\\n+\\n+\\n+# Will error if the minimal version of Transformers is not installed. Remove at your own risks.\\n+check_min_version(\"4.23.0.dev0\")\\n+\\n+logger = get_logger(__name__)\\n+\\n+require_version(\"datasets>=1.8.0\", \"To fix: pip install -r examples/pytorch/language-modeling/requirements.txt\")\\n+\\n+MODEL_CONFIG_CLASSES = list(MODEL_MAPPING.keys())\\n+MODEL_TYPES = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)\\n+\\n+\\n+def parse_args():\\n+ parser = argparse.ArgumentParser(description=\"Finetune a transformers model on a causal language modeling task\")\\n+ parser.add_argument(\\n+ \"--dataset_name\",\\n+ type=str,\\n+ default=None,\\n+ help=\"The name of the dataset to use (via the datasets library).\",\\n+ )\\n+ parser.add_argument(\\n+ \"--dataset_config_name\",\\n+ type=str,\\n+ default=None,\\n+ help=\"The configuration name of the dataset to use (via the datasets library).\",\\n+ )\\n+ parser.add_argument(\\n+ \"--train_file\", type=str, default=None, help=\"A csv or a json file containing the training data.\"\\n+ )\\n+ parser.add_argument(\\n+ \"--validation_file\", type=str, default=None, help=\"A csv or a json file containing the validation data.\"\\n+ )\\n+ parser.add_argument(\\n+ \"--validation_split_percentage\",\\n+ default=5,\\n+ help=\"The percentage of the train set used as validation set in case there\\'s no validation split\",\\n+ )\\n+ parser.add_argument(\\n+ \"--model_name_or_path\",\\n+ type=str,\\n+ help=\"Path to pretrained model or model identifier from huggingface.co/models.\",\\n+ required=False,\\n+ )\\n+ parser.add_argument(\\n+ \"--config_name\",\\n+ type=str,\\n+ default=None,\\n+ help=\"Pretrained config name or path if not the same as model_name\",\\n+ )\\n+ parser.add_argument(\\n+ \"--tokenizer_name\",\\n+ type=str,\\n+ default=None,\\n+ help=\"Pretrained tokenizer name or path if not the same as model_name\",\\n+ )\\n+ parser.add_argument(\\n+ \"--use_slow_tokenizer\",\\n+ action=\"store_true\",\\n+ help=\"If passed, will use a slow tokenizer (not backed by the 🤗 Tokenizers library).\",\\n+ )\\n+ parser.add_argument(\\n+ \"--per_device_train_batch_size\",\\n+ type=int,\\n+ default=8,\\n+ help=\"Batch size (per device) for the training dataloader.\",\\n+ )\\n+ parser.add_argument(\\n+ \"--per_device_eval_batch_size\",\\n+ type=int,\\n+ default=8,\\n+ help=\"Batch size (per device) for the evaluation dataloader.\",\\n+ )\\n+ parser.add_argument(\\n+ \"--learning_rate\",\\n+ type=float,\\n+ default=5e-5,\\n+ help=\"Initial learning rate (after the potential warmup period) to use.\",\\n+ )\\n+ parser.add_argument(\"--weight_decay\", type=float, default=0.0, help=\"Weight decay to use.\")\\n+ parser.add_argument(\"--num_train_epochs\", type=int, default=3, help=\"Total number of training epochs to perform.\")\\n+ parser.add_argument(\\n+ \"--max_train_steps\",\\n+ type=int,\\n+ default=None,\\n+ help=\"Total number of training steps to perform. If provided, overrides num_train_epochs.\",\\n+ )\\n+ parser.add_argument(\\n+ \"--gradient_accumulation_steps\",\\n+ type=int,\\n+ default=1,\\n+ help=\"Number of updates steps to accumulate before performing a backward/update pass.\",\\n+ )\\n+ parser.add_argument(\\n+ \"--lr_scheduler_type\",\\n+ type=SchedulerType,\\n+ default=\"linear\",\\n+ help=\"The scheduler type to use.\",\\n+ choices=[\"linear\", \"cosine\", \"cosine_with_restarts\", \"polynomial\", \"constant\", \"constant_with_warmup\"],\\n+ )\\n+ parser.add_argument(\\n+ \"--num_warmup_steps\", type=int, default=0, help=\"Number of steps for the warmup in the lr scheduler.\"\\n+ )\\n+ parser.add_argument(\"--output_dir\", type=str, default=None, help=\"Where to store the final model.\")\\n+ parser.add_argument(\"--seed\", type=int, default=None, help=\"A seed for reproducible training.\")\\n+ parser.add_argument(\\n+ \"--model_type\",\\n+ type=str,\\n+ default=None,\\n+ help=\"Model type to use if training from scratch.\",\\n+ choices=MODEL_TYPES,\\n+ )\\n+ parser.add_argument(\\n+ \"--block_size\",\\n+ type=int,\\n+ default=None,\\n+ help=(\\n+ \"Optional input sequence length after tokenization. The training dataset will be truncated in block of\"\\n+ \" this size for training. Default to the model max input length for single sentence inputs (take into\"\\n+ \" account special tokens).\"\\n+ ),\\n+ )\\n+ parser.add_argument(\\n+ \"--preprocessing_num_workers\",\\n+ type=int,\\n+ default=None,\\n+ help=\"The number of processes to use for the preprocessing.\",\\n+ )\\n+ parser.add_argument(\\n+ \"--overwrite_cache\", action=\"store_true\", help=\"Overwrite the cached training and evaluation sets\"\\n+ )\\n+ parser.add_argument(\\n+ \"--no_keep_linebreaks\", action=\"store_true\", help=\"Do not keep line breaks when using TXT files.\"\\n+ )\\n+ parser.add_argument(\"--push_to_hub\", action=\"store_true\", help=\"Whether or not to push the model to the Hub.\")\\n+ parser.add_argument(\\n+ \"--hub_model_id\", type=str, help=\"The name of the repository to keep in sync with the local `output_dir`.\"\\n+ )\\n+ parser.add_argument(\"--hub_token\", type=str, help=\"The token to use to push to the Model Hub.\")\\n+ parser.add_argument(\\n+ \"--checkpointing_steps\",\\n+ type=str,\\n+ default=None,\\n+ help=\"Whether the various states should be saved at the end of every n steps, or \\'epoch\\' for each epoch.\",\\n+ )\\n+ parser.add_argument(\\n+ \"--resume_from_checkpoint\",\\n+ type=str,\\n+ default=None,\\n+ help=\"If the training should continue from a checkpoint folder.\",\\n+ )\\n+ parser.add_argument(\\n+ \"--with_tracking\",\\n+ action=\"store_true\",\\n+ help=\"Whether to enable experiment trackers for logging.\",\\n+ )\\n+ parser.add_argument(\\n+ \"--report_to\",\\n+ type=str,\\n+ default=\"all\",\\n+ help=(\\n+ \\'The integration to report the results and logs to. Supported platforms are `\"tensorboard\"`,\\'\\n+ \\' `\"wandb\"` and `\"comet_ml\"`. Use `\"all\"` (default) to report to all integrations.\\'\\n+ \"Only applicable when `--with_tracking` is passed.\"\\n+ ),\\n+ )\\n+ args = parser.parse_args()\\n+\\n+ # Sanity checks\\n+ if args.dataset_name is None and args.train_file is None and args.validation_file is None:\\n+ raise ValueError(\"Need either a dataset name or a training/validation file.\")\\n+ else:\\n+ if args.train_file is not None:\\n+ extension = args.train_file.split(\".\")[-1]\\n+ assert extension in [\"csv\", \"json\", \"txt\"], \"`train_file` should be a csv, json or txt file.\"\\n+ if args.validation_file is not None:\\n+ extension = args.validation_file.split(\".\")[-1]\\n+ assert extension in [\"csv\", \"json\", \"txt\"], \"`validation_file` should be a csv, json or txt file.\"\\n+\\n+ if args.push_to_hub:\\n+ assert args.output_dir is not None, \"Need an `output_dir` to create a repo when `--push_to_hub` is passed.\"\\n+\\n+ return args\\n+\\n+\\n+def main():\\n+ args = parse_args()\\n+\\n+ # Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The\\n+ # information sent is the one passed as arguments along with your Python/PyTorch versions.\\n+ send_example_telemetry(\"run_clm_no_trainer\", args)\\n+\\n+ # Initialize the accelerator. We will let the accelerator handle device placement for us in this example.\\n+ # If we\\'re using tracking, we also need to initialize it here and it will by default pick up all supported trackers\\n+ # in the environment\\n+ accelerator_log_kwargs = {}\\n+\\n+ if args.with_tracking:\\n+ accelerator_log_kwargs[\"log_with\"] = args.report_to\\n+ accelerator_log_kwargs[\"logging_dir\"] = args.output_dir\\n+\\n+ accelerator = Accelerator(gradient_accumulation_steps=args.gradient_accumulation_steps, **accelerator_log_kwargs)\\n+\\n+ # Make one log on every process with the configuration for debugging.\\n+ logging.basicConfig(\\n+ format=\"%(asctime)s - %(levelname)s - %(name)s - %(message)s\",\\n+ datefmt=\"%m/%d/%Y %H:%M:%S\",\\n+ level=logging.INFO,\\n+ )\\n+ logger.info(accelerator.state, main_process_only=False)\\n+ if accelerator.is_local_main_process:\\n+ datasets.utils.logging.set_verbosity_warning()\\n+ transformers.utils.logging.set_verbosity_info()\\n+ else:\\n+ datasets.utils.logging.set_verbosity_error()\\n+ transformers.utils.logging.set_verbosity_error()\\n+\\n+ # If passed along, set the training seed now.\\n+ if args.seed is not None:\\n+ set_seed(args.seed)\\n+\\n+ # Handle the repository creation\\n+ if accelerator.is_main_process:\\n+ if args.push_to_hub:\\n+ if args.hub_model_id is None:\\n+ repo_name = get_full_repo_name(Path(args.output_dir).name, token=args.hub_token)\\n+ else:\\n+ repo_name = args.hub_model_id\\n+ repo = Repository(args.output_dir, clone_from=repo_name)\\n+\\n+ with open(os.path.join(args.output_dir, \".gitignore\"), \"w+\") as gitignore:\\n+ if \"step_*\" not in gitignore:\\n+ gitignore.write(\"step_*\\\\n\")\\n+ if \"epoch_*\" not in gitignore:\\n+ gitignore.write(\"epoch_*\\\\n\")\\n+ elif args.output_dir is not None:\\n+ os.makedirs(args.output_dir, exist_ok=True)\\n+ accelerator.wait_for_everyone()\\n+\\n+ # Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)\\n+ # or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/\\n+ # (the dataset will be downloaded automatically from the datasets Hub).\\n+ #\\n+ # For CSV/JSON files, this script will use the column called \\'text\\' or the first column if no column called\\n+ # \\'text\\' is found. You can easily tweak this behavior (see below).\\n+ #\\n+ # In distributed training, the load_dataset function guarantee that only one local process can concurrently\\n+ # download the dataset.\\n+ if args.dataset_name is not None:\\n+ # Downloading and loading a dataset from the hub.\\n+ raw_datasets = load_dataset(args.dataset_name, args.dataset_config_name)\\n+ if \"validation\" not in raw_datasets.keys():\\n+ raw_datasets[\"validation\"] = load_dataset(\\n+ args.dataset_name,\\n+ args.dataset_config_name,\\n+ split=f\"train[:{args.validation_split_percentage}%]\",\\n+ )\\n+ raw_datasets[\"train\"] = load_dataset(\\n+ args.dataset_name,\\n+ args.dataset_config_name,\\n+ split=f\"train[{args.validation_split_percentage}%:]\",\\n+ )\\n+ else:\\n+ data_files = {}\\n+ dataset_args = {}\\n+ if args.train_file is not None:\\n+ data_files[\"train\"] = args.train_file\\n+ if args.validation_file is not None:\\n+ data_files[\"validation\"] = args.validation_file\\n+ extension = args.train_file.split(\".\")[-1]\\n+ if extension == \"txt\":\\n+ extension = \"text\"\\n+ dataset_args[\"keep_linebreaks\"] = not args.no_keep_linebreaks\\n+ raw_datasets = load_dataset(extension, data_files=data_files, **dataset_args)\\n+ # If no validation data is there, validation_split_percentage will be used to divide the dataset.\\n+ if \"validation\" not in raw_datasets.keys():\\n+ raw_datasets[\"validation\"] = load_dataset(\\n+ extension,\\n+ data_files=data_files,\\n+ split=f\"train[:{args.validation_split_percentage}%]\",\\n+ **dataset_args,\\n+ )\\n+ raw_datasets[\"train\"] = load_dataset(\\n+ extension,\\n+ data_files=data_files,\\n+ split=f\"train[{args.validation_split_percentage}%:]\",\\n+ **dataset_args,\\n+ )\\n+\\n+ # See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at\\n+ # https://huggingface.co/docs/datasets/loading_datasets.html.\\n+\\n+ # Load pretrained model and tokenizer\\n+ #\\n+ # In distributed training, the .from_pretrained methods guarantee that only one local process can concurrently\\n+ # download model & vocab.\\n+ if args.config_name:\\n+ config = AutoConfig.from_pretrained(args.config_name)\\n+ elif args.model_name_or_path:\\n+ config = AutoConfig.from_pretrained(args.model_name_or_path)\\n+ else:\\n+ config = CONFIG_MAPPING[args.model_type]()\\n+ logger.warning(\"You are instantiating a new config instance from scratch.\")\\n+\\n+ if args.tokenizer_name:\\n+ tokenizer = AutoTokenizer.from_pretrained(args.tokenizer_name, use_fast=not args.use_slow_tokenizer)\\n+ elif args.model_name_or_path:\\n+ tokenizer = AutoTokenizer.from_pretrained(args.model_name_or_path, use_fast=not args.use_slow_tokenizer)\\n+ else:\\n+ raise ValueError(\\n+ \"You are instantiating a new tokenizer from scratch. This is not supported by this script.\"\\n+ \"You can do it from another script, save it, and load it from here, using --tokenizer_name.\"\\n+ )\\n+\\n+ if args.model_name_or_path:\\n+ model = AutoModelForCausalLM.from_pretrained(\\n+ args.model_name_or_path,\\n+ from_tf=bool(\".ckpt\" in args.model_name_or_path),\\n+ config=config,\\n+ )\\n+ else:\\n+ logger.info(\"Training new model from scratch\")\\n+ model = AutoModelForCausalLM.from_config(config)\\n+\\n+ model.resize_token_embeddings(len(tokenizer))\\n+\\n+ # Preprocessing the datasets.\\n+ # First we tokenize all the texts.\\n+ column_names = raw_datasets[\"train\"].column_names\\n+ text_column_name = \"text\" if \"text\" in column_names else column_names[0]\\n+\\n+ def tokenize_function(examples):\\n+ return tokenizer(examples[text_column_name])\\n+\\n+ with accelerator.main_process_first():\\n+ tokenized_datasets = raw_datasets.map(\\n+ tokenize_function,\\n+ batched=True,\\n+ num_proc=args.preprocessing_num_workers,\\n+ remove_columns=column_names,\\n+ load_from_cache_file=not args.overwrite_cache,\\n+ desc=\"Running tokenizer on dataset\",\\n+ )\\n+\\n+ if args.block_size is None:\\n+ block_size = tokenizer.model_max_length\\n+ if block_size > 1024:\\n+ logger.warning(\\n+ f\"The tokenizer picked seems to have a very large `model_max_length` ({tokenizer.model_max_length}). \"\\n+ \"Picking 1024 instead. You can change that default value by passing --block_size xxx.\"\\n+ )\\n+ block_size = 1024\\n+ else:\\n+ if args.block_size > tokenizer.model_max_length:\\n+ logger.warning(\\n+ f\"The block_size passed ({args.block_size}) is larger than the maximum length for the model\"\\n+ f\"({tokenizer.model_max_length}). Using block_size={tokenizer.model_max_length}.\"\\n+ )\\n+ block_size = min(args.block_size, tokenizer.model_max_length)\\n+\\n+ # Main data processing function that will concatenate all texts from our dataset and generate chunks of block_size.\\n+ def group_texts(examples):\\n+ # Concatenate all texts.\\n+ concatenated_examples = {k: list(chain(*examples[k])) for k in examples.keys()}\\n+ total_length = len(concatenated_examples[list(examples.keys())[0]])\\n+ # We drop the small remainder, we could add padding if the model supported it instead of this drop, you can\\n+ # customize this part to your needs.\\n+ if total_length >= block_size:\\n+ total_length = (total_length // block_size) * block_size\\n+ # Split by chunks of max_len.\\n+ result = {\\n+ k: [t[i : i + block_size] for i in range(0, total_length, block_size)]\\n+ for k, t in concatenated_examples.items()\\n+ }\\n+ result[\"labels\"] = result[\"input_ids\"].copy()\\n+ return result\\n+\\n+ # Note that with `batched=True`, this map processes 1,000 texts together, so group_texts throws away a remainder\\n+ # for each of those groups of 1,000 texts. You can adjust that batch_size here but a higher value might be slower\\n+ # to preprocess.\\n+ #\\n+ # To speed up this part, we use multiprocessing. See the documentation of the map method for more information:\\n+ # https://huggingface.co/docs/datasets/package_reference/main_classes.html#datasets.Dataset.map\\n+\\n+ with accelerator.main_process_first():\\n+ lm_datasets = tokenized_datasets.map(\\n+ group_texts,\\n+ batched=True,\\n+ num_proc=args.preprocessing_num_workers,\\n+ load_from_cache_file=not args.overwrite_cache,\\n+ desc=f\"Grouping texts in chunks of {block_size}\",\\n+ )\\n+\\n+ train_dataset = lm_datasets[\"train\"]\\n+ eval_dataset = lm_datasets[\"validation\"]\\n+\\n+ # Log a few random samples from the training set:\\n+ for index in random.sample(range(len(train_dataset)), 3):\\n+ logger.info(f\"Sample {index} of the training set: {train_dataset[index]}.\")\\n+\\n+ # DataLoaders creation:\\n+ train_dataloader = DataLoader(\\n+ train_dataset, shuffle=True, collate_fn=default_data_collator, batch_size=args.per_device_train_batch_size\\n+ )\\n+ eval_dataloader = DataLoader(\\n+ eval_dataset, collate_fn=default_data_collator, batch_size=args.per_device_eval_batch_size\\n+ )\\n+\\n+ # Optimizer\\n+ # Split weights in two groups, one with weight decay and the other not.\\n+ no_decay = [\"bias\", \"layer_norm.weight\"]\\n+ optimizer_grouped_parameters = [\\n+ {\\n+ \"params\": [p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay)],\\n+ \"weight_decay\": args.weight_decay,\\n+ },\\n+ {\\n+ \"params\": [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)],\\n+ \"weight_decay\": 0.0,\\n+ },\\n+ ]\\n+ optimizer = torch.optim.AdamW(optimizer_grouped_parameters, lr=args.learning_rate)\\n+\\n+ # Scheduler and math around the number of training steps.\\n+ overrode_max_train_steps = False\\n+ num_update_steps_per_epoch = math.ceil(len(train_dataloader) / args.gradient_accumulation_steps)\\n+ if args.max_train_steps is None:\\n+ args.max_train_steps = args.num_train_epochs * num_update_steps_per_epoch\\n+ overrode_max_train_steps = True\\n+\\n+ # New Code\\n+ # For Megatron-LM, we need to use `MegatronLMDummyScheduler` instead of regular schedulers\\n+ if accelerator.distributed_type == DistributedType.MEGATRON_LM:\\n+ lr_scheduler = MegatronLMDummyScheduler(\\n+ optimizer=optimizer,\\n+ total_num_steps=args.max_train_steps,\\n+ warmup_num_steps=args.num_warmup_steps,\\n+ )\\n+ else:\\n+ lr_scheduler = get_scheduler(\\n+ name=args.lr_scheduler_type,\\n+ optimizer=optimizer,\\n+ num_warmup_steps=args.num_warmup_steps * args.gradient_accumulation_steps,\\n+ num_training_steps=args.max_train_steps * args.gradient_accumulation_steps,\\n+ )\\n+\\n+ # Prepare everything with our `accelerator`.\\n+ model, optimizer, train_dataloader, eval_dataloader, lr_scheduler = accelerator.prepare(\\n+ model, optimizer, train_dataloader, eval_dataloader, lr_scheduler\\n+ )\\n+\\n+ # On TPU, the tie weights in our model have been disconnected, so we need to restore the ties.\\n+ if accelerator.distributed_type == DistributedType.TPU:\\n+ model.tie_weights()\\n+\\n+ # We need to recalculate our total training steps as the size of the training dataloader may have changed.\\n+ num_update_steps_per_epoch = math.ceil(len(train_dataloader) / args.gradient_accumulation_steps)\\n+ if overrode_max_train_steps:\\n+ args.max_train_steps = args.num_train_epochs * num_update_steps_per_epoch\\n+ # Afterwards we recalculate our number of training epochs\\n+ args.num_train_epochs = math.ceil(args.max_train_steps / num_update_steps_per_epoch)\\n+\\n+ # Figure out how many steps we should save the Accelerator states\\n+ checkpointing_steps = args.checkpointing_steps\\n+ if checkpointing_steps is not None and checkpointing_steps.isdigit():\\n+ checkpointing_steps = int(checkpointing_steps)\\n+\\n+ # We need to initialize the trackers we use, and also store our configuration.\\n+ # The trackers initializes automatically on the main process.\\n+ if args.with_tracking:\\n+ experiment_config = vars(args)\\n+ # TensorBoard cannot log Enums, need the raw value\\n+ experiment_config[\"lr_scheduler_type\"] = experiment_config[\"lr_scheduler_type\"].value\\n+ accelerator.init_trackers(\"clm_no_trainer\", experiment_config)\\n+\\n+ # Train!\\n+ # New Code\\n+ # For Megatron-LM, we need to get `global_batch_size` from megatron_lm_plugin\\n+ # as it handles the specifics related to data parallelism, tensor model parallelism and pipeline parallelism\\n+ if accelerator.distributed_type == DistributedType.MEGATRON_LM:\\n+ total_batch_size = accelerator.state.megatron_lm_plugin.global_batch_size\\n+ else:\\n+ total_batch_size = (\\n+ args.per_device_train_batch_size * accelerator.num_processes * args.gradient_accumulation_steps\\n+ )\\n+\\n+ logger.info(\"***** Running training *****\")\\n+ logger.info(f\" Num examples = {len(train_dataset)}\")\\n+ logger.info(f\" Num Epochs = {args.num_train_epochs}\")\\n+ logger.info(f\" Instantaneous batch size per device = {args.per_device_train_batch_size}\")\\n+ logger.info(f\" Total train batch size (w. parallel, distributed & accumulation) = {total_batch_size}\")\\n+ logger.info(f\" Gradient Accumulation steps = {args.gradient_accumulation_steps}\")\\n+ logger.info(f\" Total optimization steps = {args.max_train_steps}\")\\n+ # Only show the progress bar once on each machine.\\n+ progress_bar = tqdm(range(args.max_train_steps), disable=not accelerator.is_local_main_process)\\n+ completed_steps = 0\\n+ starting_epoch = 0\\n+\\n+ # Potentially load in the weights and states from a previous save\\n+ if args.resume_from_checkpoint:\\n+ if args.resume_from_checkpoint is not None or args.resume_from_checkpoint != \"\":\\n+ accelerator.print(f\"Resumed from checkpoint: {args.resume_from_checkpoint}\")\\n+ accelerator.load_state(args.resume_from_checkpoint)\\n+ path = os.path.basename(args.resume_from_checkpoint)\\n+ else:\\n+ # Get the most recent checkpoint\\n+ dirs = [f.name for f in os.scandir(os.getcwd()) if f.is_dir()]\\n+ dirs.sort(key=os.path.getctime)\\n+ path = dirs[-1] # Sorts folders by date modified, most recent checkpoint is the last\\n+ # Extract `epoch_{i}` or `step_{i}`\\n+ training_difference = os.path.splitext(path)[0]\\n+\\n+ if \"epoch\" in training_difference:\\n+ starting_epoch = int(training_difference.replace(\"epoch_\", \"\")) + 1\\n+ resume_step = None\\n+ else:\\n+ # need to multiply `gradient_accumulation_steps` to reflect real steps\\n+ resume_step = int(training_difference.replace(\"step_\", \"\")) * args.gradient_accumulation_steps\\n+ starting_epoch = resume_step // len(train_dataloader)\\n+ resume_step -= starting_epoch * len(train_dataloader)\\n+\\n+ # update the progress_bar if load from checkpoint\\n+ progress_bar.update(starting_epoch * num_update_steps_per_epoch)\\n+ completed_steps = starting_epoch * num_update_steps_per_epoch\\n+\\n+ for epoch in range(starting_epoch, args.num_train_epochs):\\n+ model.train()\\n+ if args.with_tracking:\\n+ total_loss = 0\\n+ for step, batch in enumerate(train_dataloader):\\n+ # We need to skip steps until we reach the resumed step\\n+ if args.resume_from_checkpoint and epoch == starting_epoch:\\n+ if resume_step is not None and step < resume_step:\\n+ if step % args.gradient_accumulation_steps == 0:\\n+ progress_bar.update(1)\\n+ completed_steps += 1\\n+ continue\\n+\\n+ with accelerator.accumulate(model):\\n+ outputs = model(**batch)\\n+ loss = outputs.loss\\n+ # We keep track of the loss at each epoch\\n+ if args.with_tracking:\\n+ total_loss += loss.detach().float()\\n+ accelerator.backward(loss)\\n+ optimizer.step()\\n+ lr_scheduler.step()\\n+ optimizer.zero_grad()\\n+\\n+ # Checks if the accelerator has performed an optimization step behind the scenes\\n+ if accelerator.sync_gradients:\\n+ progress_bar.update(1)\\n+ completed_steps += 1\\n+\\n+ if isinstance(checkpointing_steps, int):\\n+ if completed_steps % checkpointing_steps == 0:\\n+ output_dir = f\"step_{completed_steps }\"\\n+ if args.output_dir is not None:\\n+ output_dir = os.path.join(args.output_dir, output_dir)\\n+ accelerator.save_state(output_dir)\\n+ if completed_steps >= args.max_train_steps:\\n+ break\\n+\\n+ model.eval()\\n+ losses = []\\n+ for step, batch in enumerate(eval_dataloader):\\n+ with torch.no_grad():\\n+ outputs = model(**batch)\\n+\\n+ loss = outputs.loss\\n+ # New Code\\n+ # For Megatron-LM, the losses are already averaged across the data parallel group\\n+ if accelerator.distributed_type == DistributedType.MEGATRON_LM:\\n+ losses.append(loss)\\n+ else:\\n+ losses.append(accelerator.gather_for_metrics(loss.repeat(args.per_device_eval_batch_size)))\\n+ try:\\n+ if accelerator.distributed_type == DistributedType.MEGATRON_LM:\\n+ losses = torch.tensor(losses)\\n+ else:\\n+ losses = torch.cat(losses)\\n+ eval_loss = torch.mean(losses)\\n+ perplexity = math.exp(eval_loss)\\n+ except OverflowError:\\n+ perplexity = float(\"inf\")\\n+\\n+ logger.info(f\"epoch {epoch}: perplexity: {perplexity} eval_loss: {eval_loss}\")\\n+\\n+ if args.with_tracking:\\n+ accelerator.log(\\n+ {\\n+ \"perplexity\": perplexity,\\n+ \"eval_loss\": eval_loss,\\n+ \"train_loss\": total_loss.item() / len(train_dataloader),\\n+ \"epoch\": epoch,\\n+ \"step\": completed_steps,\\n+ },\\n+ step=completed_steps,\\n+ )\\n+\\n+ if args.push_to_hub and epoch < args.num_train_epochs - 1:\\n+ accelerator.wait_for_everyone()\\n+ unwrapped_model = accelerator.unwrap_model(model)\\n+ unwrapped_model.save_pretrained(\\n+ args.output_dir, is_main_process=accelerator.is_main_process, save_function=accelerator.save\\n+ )\\n+ if accelerator.is_main_process:\\n+ tokenizer.save_pretrained(args.output_dir)\\n+ repo.push_to_hub(\\n+ commit_message=f\"Training in progress epoch {epoch}\", blocking=False, auto_lfs_prune=True\\n+ )\\n+\\n+ if args.checkpointing_steps == \"epoch\":\\n+ output_dir = f\"epoch_{epoch}\"\\n+ if args.output_dir is not None:\\n+ output_dir = os.path.join(args.output_dir, output_dir)\\n+ accelerator.save_state(output_dir)\\n+\\n+ if args.with_tracking:\\n+ accelerator.end_training()\\n+\\n+ if args.output_dir is not None:\\n+ accelerator.wait_for_everyone()\\n+ # New Code\\n+ # For Megatron-LM, we need to save the model using `accelerator.save_state`\\n+ if accelerator.distributed_type == DistributedType.MEGATRON_LM:\\n+ accelerator.save_state(args.output_dir)\\n+ else:\\n+ unwrapped_model = accelerator.unwrap_model(model)\\n+ unwrapped_model.save_pretrained(\\n+ args.output_dir, is_main_process=accelerator.is_main_process, save_function=accelerator.save\\n+ )\\n+ if accelerator.is_main_process:\\n+ tokenizer.save_pretrained(args.output_dir)\\n+ if args.push_to_hub:\\n+ repo.push_to_hub(commit_message=\"End of training\", auto_lfs_prune=True)\\n+\\n+ with open(os.path.join(args.output_dir, \"all_results.json\"), \"w\") as f:\\n+ json.dump({\"perplexity\": perplexity}, f)\\n+\\n+\\n+if __name__ == \"__main__\":\\n+ main()\\ndiff --git a/src/accelerate/accelerator.py b/src/accelerate/accelerator.py\\nindex 2b6558944..e40797b39 100644\\n--- a/src/accelerate/accelerator.py\\n+++ b/src/accelerate/accelerator.py\\n@@ -41,6 +41,7 @@\\n InitProcessGroupKwargs,\\n KwargsHandler,\\n LoggerType,\\n+ MegatronLMPlugin,\\n PrecisionType,\\n RNGType,\\n compare_versions,\\n@@ -50,6 +51,7 @@\\n get_pretty_name,\\n is_bf16_available,\\n is_deepspeed_available,\\n+ is_megatron_lm_available,\\n is_torch_version,\\n is_tpu_available,\\n pad_across_processes,\\n@@ -71,6 +73,21 @@\\n DummyScheduler,\\n )\\n \\n+if is_megatron_lm_available():\\n+ from .utils import (\\n+ MegatronEngine,\\n+ MegatronLMDummyDataLoader,\\n+ MegatronLMDummyScheduler,\\n+ MegatronLMOptimizerWrapper,\\n+ MegatronLMSchedulerWrapper,\\n+ megatron_lm_initialize,\\n+ megatron_lm_prepare_data_loader,\\n+ megatron_lm_prepare_model,\\n+ megatron_lm_prepare_optimizer,\\n+ megatron_lm_prepare_scheduler,\\n+ )\\n+\\n+\\n if is_tpu_available(check_device=False):\\n import torch_xla.distributed.xla_multiprocessing as xmp\\n \\n@@ -107,6 +124,9 @@ class Accelerator:\\n fsdp_plugin (`FullyShardedDataParallelPlugin`, *optional*):\\n Tweak your FSDP related args using this argument. This argument is optional and can be configured directly\\n using *accelerate config*\\n+ megatron_lm_plugin (`MegatronLMPlugin`, *optional*):\\n+ Tweak your MegatronLM related args using this argument. This argument is optional and can be configured\\n+ directly using *accelerate config*\\n rng_types (list of `str` or [`~utils.RNGType`]):\\n The list of random number generators to synchronize at the beginning of each iteration in your prepared\\n dataloaders. Should be one or several of:\\n@@ -166,6 +186,7 @@ def __init__(\\n cpu: bool = False,\\n deepspeed_plugin: DeepSpeedPlugin = None,\\n fsdp_plugin: FullyShardedDataParallelPlugin = None,\\n+ megatron_lm_plugin: MegatronLMPlugin = None,\\n rng_types: Optional[List[Union[str, RNGType]]] = None,\\n log_with: Optional[List[Union[str, LoggerType, GeneralTracker]]] = None,\\n logging_dir: Optional[Union[str, os.PathLike]] = None,\\n@@ -218,6 +239,17 @@ def __init__(\\n raise TypeError(\"`fsdp_plugin` must be a FullyShardedDataParallelPlugin object.\")\\n os.environ[\"USE_FSDP\"] = \"true\" # use FSDP if plugin is provided\\n \\n+ if megatron_lm_plugin is None: # init from env variables\\n+ megatron_lm_plugin = MegatronLMPlugin() if os.environ.get(\"USE_MEGATRON_LM\", \"false\") == \"true\" else None\\n+ else:\\n+ if not isinstance(megatron_lm_plugin, MegatronLMPlugin):\\n+ raise TypeError(\"`megatron_lm_plugin` must be a MegatronLMPlugin object.\")\\n+ os.environ[\"USE_MEGATRON_LM\"] = \"true\" # use MegatronLM if plugin is provided\\n+\\n+ if megatron_lm_plugin:\\n+ if not is_megatron_lm_available():\\n+ raise ImportError(\"Megatron is not installed. please build it from source.\")\\n+\\n # Kwargs handlers\\n self.ddp_handler = None\\n self.scaler_handler = None\\n@@ -247,6 +279,7 @@ def __init__(\\n cpu=cpu,\\n deepspeed_plugin=deepspeed_plugin,\\n fsdp_plugin=fsdp_plugin,\\n+ megatron_lm_plugin=megatron_lm_plugin,\\n _from_accelerator=True,\\n **kwargs,\\n )\\n@@ -278,7 +311,7 @@ def __init__(\\n self.scaler = None\\n self.native_amp = False\\n err = \"{mode} mixed precision requires {requirement}\"\\n- if self.state.mixed_precision == \"fp16\":\\n+ if self.state.mixed_precision == \"fp16\" and self.distributed_type != DistributedType.MEGATRON_LM:\\n self.native_amp = True\\n if not torch.cuda.is_available() and not parse_flag_from_env(\"USE_MPS_DEVICE\"):\\n raise ValueError(err.format(mode=\"fp16\", requirement=\"a GPU\"))\\n@@ -289,7 +322,11 @@ def __init__(\\n self.scaler = ShardedGradScaler(**kwargs)\\n else:\\n self.scaler = torch.cuda.amp.GradScaler(**kwargs)\\n- elif self.state.mixed_precision == \"bf16\" and self.distributed_type != DistributedType.FSDP:\\n+ elif (\\n+ self.state.mixed_precision == \"bf16\"\\n+ and self.distributed_type != DistributedType.FSDP\\n+ and self.distributed_type != DistributedType.MEGATRON_LM\\n+ ):\\n self.native_amp = is_bf16_available(True)\\n if mixed_precision == \"bf16\" and not self.native_amp and not is_tpu_available():\\n raise ValueError(err.format(mode=\"bf16\", requirement=\"PyTorch >= 1.10 and a supported device.\"))\\n@@ -344,17 +381,27 @@ def device(self):\\n @property\\n def is_main_process(self):\\n \"\"\"True for one process only.\"\"\"\\n- return self.process_index == 0\\n+ return (\\n+ self.process_index == 0 if self.distributed_type != DistributedType.MEGATRON_LM else self.is_last_process\\n+ )\\n \\n @property\\n def is_local_main_process(self):\\n \"\"\"True for one process per server.\"\"\"\\n- return self.local_process_index == 0\\n+ return (\\n+ self.local_process_index == 0\\n+ if self.distributed_type != DistributedType.MEGATRON_LM\\n+ else self.is_last_process\\n+ )\\n \\n @property\\n def use_fp16(self):\\n return self.mixed_precision != \"no\"\\n \\n+ @property\\n+ def is_last_process(self):\\n+ return self.process_index == self.num_processes - 1\\n+\\n @property\\n def mixed_precision(self):\\n if self.distributed_type == DistributedType.DEEPSPEED:\\n@@ -393,6 +440,18 @@ def wrapper(self, *args, **kwargs):\\n \\n return wrapper\\n \\n+ def on_last_process(func):\\n+ \"\"\"\\n+ A decorator that will run the decorated function on the last process only.\\n+ \"\"\"\\n+\\n+ @wraps(func)\\n+ def wrapper(self, *args, **kwargs):\\n+ if self.is_last_process or not self.use_distributed:\\n+ return func(self, *args, **kwargs)\\n+\\n+ return wrapper\\n+\\n def on_process(process_idx):\\n \"\"\"\\n A decorator that will run the decorated function on a given process index only.\\n@@ -628,8 +687,8 @@ def prepare(self, *args, device_placement=None):\\n \"\"\"\\n if device_placement is None:\\n device_placement = [None for _ in args]\\n- elif self.distributed_type == DistributedType.DEEPSPEED:\\n- raise ValueError(\"You can\\'t customize device placements with DeepSpeed.\")\\n+ elif self.distributed_type in (DistributedType.DEEPSPEED, DistributedType.MEGATRON_LM):\\n+ raise ValueError(\"You can\\'t customize device placements with DeepSpeed or Megatron-LM.\")\\n elif len(device_placement) != len(args):\\n raise ValueError(\\n f\"`device_placement` should be a list with {len(args)} elements (the number of objects passed).\"\\n@@ -677,6 +736,8 @@ def prepare(self, *args, device_placement=None):\\n \\n if self.distributed_type == DistributedType.DEEPSPEED:\\n result = self._prepare_deepspeed(*args)\\n+ elif self.distributed_type == DistributedType.MEGATRON_LM:\\n+ result = self._prepare_megatron_lm(*args)\\n else:\\n result = tuple(\\n self._prepare_one(obj, first_pass=True, device_placement=d) for obj, d in zip(args, device_placement)\\n@@ -923,6 +984,107 @@ def _prepare_deepspeed(self, *args):\\n )\\n return tuple(result)\\n \\n+ def _prepare_megatron_lm(self, *args):\\n+ megatron_lm_plugin = self.state.megatron_lm_plugin\\n+ if not megatron_lm_plugin.megatron_dataset_flag:\\n+ batch_sizes = [obj.batch_size for obj in args if hasattr(obj, \"batch_size\")]\\n+ if len(batch_sizes) == 0:\\n+ raise ValueError(\\n+ \"You must specify a training or evaluation dataloader in `accelerate.prepare()` when using Megatron-LM.\"\\n+ )\\n+\\n+ micro_batch_size = min(batch_sizes) if megatron_lm_plugin.is_train_batch_min else max(batch_sizes)\\n+ if len(batch_sizes) > 1:\\n+ logger.info(\\n+ \"Since you passed both train and evaluation dataloader, `is_train_batch_min` (here \"\\n+ f\"{megatron_lm_plugin.is_train_batch_min} will decide the `train_batch_size` ({micro_batch_size}).\"\\n+ )\\n+ else:\\n+ for obj in args:\\n+ if isinstance(obj, MegatronLMDummyDataLoader):\\n+ micro_batch_size = obj.dataset_args[\"micro_batch_size\"]\\n+ break\\n+\\n+ dp_degree = self.num_processes // (megatron_lm_plugin.tp_degree * megatron_lm_plugin.pp_degree)\\n+ megatron_lm_plugin.set_training_args(micro_batch_size, dp_degree)\\n+\\n+ model = None\\n+ optimizer = None\\n+ scheduler = None\\n+ is_dummy_scheduler = False\\n+ batch_data = None\\n+ for obj in args:\\n+ if isinstance(obj, torch.utils.data.DataLoader) and batch_data is None:\\n+ batch_data = next(iter(obj))\\n+ if isinstance(obj, torch.nn.Module):\\n+ model = obj\\n+ elif isinstance(obj, (torch.optim.Optimizer)):\\n+ optimizer = obj\\n+ elif isinstance(obj, (torch.optim.lr_scheduler._LRScheduler, MegatronLMDummyScheduler)):\\n+ scheduler = obj\\n+\\n+ if model is not None:\\n+ megatron_lm_plugin.set_network_size_args(model, batch_data)\\n+ if optimizer is not None:\\n+ megatron_lm_plugin.set_optimizer_type(optimizer)\\n+ if scheduler is not None:\\n+ is_dummy_scheduler = isinstance(scheduler, MegatronLMDummyScheduler)\\n+ if not is_dummy_scheduler:\\n+ raise ValueError(\\n+ \"You can\\'t use a custom scheduler with Megatron-LM. Please use the `accelerate.utils.MegatronLMDummyScheduler` instead.\"\\n+ )\\n+ megatron_lm_plugin.set_scheduler_args(scheduler)\\n+\\n+ # initialize megatron-lm\\n+ megatron_lm_initialize(self, args_defaults=megatron_lm_plugin.megatron_lm_default_args)\\n+ counter = 0\\n+ result = []\\n+ for obj in args:\\n+ if isinstance(obj, torch.utils.data.DataLoader):\\n+ result.append(megatron_lm_prepare_data_loader(self, obj))\\n+ counter += 1\\n+ elif isinstance(obj, MegatronLMDummyDataLoader):\\n+ if counter == 0:\\n+ obj.set_megatron_data_args()\\n+ dataloaders = megatron_lm_prepare_data_loader(self, obj)\\n+ result.append(dataloaders[counter])\\n+ counter += 1\\n+ else:\\n+ result.append(obj)\\n+\\n+ if model is not None:\\n+ model = megatron_lm_prepare_model(self)\\n+ if optimizer is not None:\\n+ optimizer = megatron_lm_prepare_optimizer(self, model)\\n+ if scheduler is not None:\\n+ scheduler = megatron_lm_prepare_scheduler(self, optimizer, scheduler)\\n+\\n+ if model is not None:\\n+ model = MegatronEngine(self, model, optimizer, scheduler)\\n+ if optimizer is not None:\\n+ optimizer = MegatronLMOptimizerWrapper(optimizer)\\n+ if scheduler is not None:\\n+ scheduler = MegatronLMSchedulerWrapper(scheduler, optimizer)\\n+\\n+ for i in range(len(result)):\\n+ if isinstance(result[i], torch.nn.Module):\\n+ result[i] = model\\n+ elif isinstance(result[i], torch.optim.Optimizer):\\n+ result[i] = optimizer\\n+ elif isinstance(result[i], MegatronLMDummyScheduler):\\n+ result[i] = scheduler\\n+ if model is not None:\\n+ self._models.append(model)\\n+ if optimizer is not None:\\n+ self._optimizers.append(optimizer)\\n+ if scheduler is not None:\\n+ self._schedulers.append(scheduler)\\n+ if len(self._models) > 1:\\n+ raise AssertionError(\\n+ \"You can\\'t use same `Accelerator()` instance with multiple models when using Megatron-LM\"\\n+ )\\n+ return tuple(result)\\n+\\n def prepare_data_loader(self, data_loader: torch.utils.data.DataLoader, device_placement=None):\\n \"\"\"\\n Prepares a PyTorch DataLoader for training in any distributed setup. It is recommended to use\\n@@ -1001,6 +1163,8 @@ def backward(self, loss, **kwargs):\\n loss = loss / self.gradient_accumulation_steps\\n if self.distributed_type == DistributedType.DEEPSPEED:\\n self.deepspeed_engine_wrapped.backward(loss, **kwargs)\\n+ elif self.distributed_type == DistributedType.MEGATRON_LM:\\n+ return\\n elif self.scaler is not None:\\n self.scaler.scale(loss).backward(**kwargs)\\n else:\\n@@ -1312,6 +1476,10 @@ def save_state(self, output_dir: str):\\n ckpt_id = f\"{MODEL_NAME}\" if i == 0 else f\"{MODEL_NAME}_{i}\"\\n model.save_checkpoint(output_dir, ckpt_id)\\n logger.info(f\"DeepSpeed Model and Optimizer saved to output dir {os.path.join(output_dir, ckpt_id)}\")\\n+ elif self.distributed_type == DistributedType.MEGATRON_LM:\\n+ logger.info(\"Saving Megatron-LM Model, Optimizer and Scheduler\")\\n+ model.save_checkpoint(output_dir)\\n+ logger.info(f\"Megatron-LM Model , Optimizer and Scheduler saved to output dir {output_dir}\")\\n else:\\n weights.append(self.get_state_dict(model, unwrap=False))\\n \\n@@ -1322,7 +1490,7 @@ def save_state(self, output_dir: str):\\n logger.info(\"Saving FSDP Optimizer\")\\n self.state.fsdp_plugin.save_optimizer(self, opt, self._models[i], output_dir, i)\\n logger.info(f\"FSDP Optimizer saved to output dir {output_dir}\")\\n- elif self.distributed_type != DistributedType.DEEPSPEED:\\n+ elif self.distributed_type not in [DistributedType.DEEPSPEED, DistributedType.MEGATRON_LM]:\\n optimizers = self._optimizers\\n \\n # Save the lr schedulers taking care of DeepSpeed nuances\\n@@ -1332,7 +1500,7 @@ def save_state(self, output_dir: str):\\n if isinstance(scheduler, DeepSpeedSchedulerWrapper):\\n continue\\n schedulers.append(scheduler)\\n- else:\\n+ elif self.distributed_type not in [DistributedType.MEGATRON_LM]:\\n schedulers = self._schedulers\\n \\n save_location = save_accelerator_state(\\n@@ -1374,6 +1542,10 @@ def load_state(self, input_dir: str):\\n ckpt_id = f\"{MODEL_NAME}\" if i == 0 else f\"{MODEL_NAME}_{i}\"\\n model.load_checkpoint(input_dir, ckpt_id)\\n logger.info(f\"DeepSpeed Model and Optimizer loaded from input dir {os.path.join(input_dir, ckpt_id)}\")\\n+ elif self.distributed_type == DistributedType.MEGATRON_LM:\\n+ logger.info(\"Loading Megatron-LM Model, Optimizer and Scheduler\")\\n+ model.load_checkpoint(input_dir)\\n+ logger.info(f\"Megatron-LM Model , Optimizer and Scheduler loaded from input dir {input_dir}\")\\n else:\\n models.append(model)\\n \\n@@ -1384,7 +1556,7 @@ def load_state(self, input_dir: str):\\n logger.info(\"Loading FSDP Optimizer\")\\n self.state.fsdp_plugin.load_optimizer(self, opt, self._models[i], input_dir, i)\\n logger.info(f\"FSDP Optimizer loaded from input dir {input_dir}\")\\n- elif self.distributed_type != DistributedType.DEEPSPEED:\\n+ elif self.distributed_type not in [DistributedType.DEEPSPEED, DistributedType.MEGATRON_LM]:\\n optimizers = self._optimizers\\n \\n # Load the lr schedulers taking care of DeepSpeed nuances\\n@@ -1394,7 +1566,7 @@ def load_state(self, input_dir: str):\\n if isinstance(scheduler, DeepSpeedSchedulerWrapper):\\n continue\\n schedulers.append(scheduler)\\n- else:\\n+ elif self.distributed_type not in [DistributedType.MEGATRON_LM]:\\n schedulers = self._schedulers\\n \\n load_accelerator_state(input_dir, models, optimizers, schedulers, self.state.process_index, self.scaler)\\ndiff --git a/src/accelerate/checkpointing.py b/src/accelerate/checkpointing.py\\nindex d5e816aab..00dfce73e 100644\\n--- a/src/accelerate/checkpointing.py\\n+++ b/src/accelerate/checkpointing.py\\n@@ -155,15 +155,18 @@ def load_accelerator_state(input_dir, models, optimizers, schedulers, process_in\\n logger.info(\"GradScaler state loaded successfully\")\\n \\n # Random states\\n- states = torch.load(os.path.join(input_dir, f\"{RNG_STATE_NAME}_{process_index}.pkl\"))\\n- random.setstate(states[\"random_state\"])\\n- np.random.set_state(states[\"numpy_random_seed\"])\\n- torch.set_rng_state(states[\"torch_manual_seed\"])\\n- torch.cuda.set_rng_state_all(states[\"torch_cuda_manual_seed\"])\\n- # ^^ safe to call this function even if cuda is not available\\n- if is_tpu_available():\\n- xm.set_rng_state(states[\"xm_seed\"])\\n- logger.info(\"All random states loaded successfully\")\\n+ try:\\n+ states = torch.load(os.path.join(input_dir, f\"{RNG_STATE_NAME}_{process_index}.pkl\"))\\n+ random.setstate(states[\"random_state\"])\\n+ np.random.set_state(states[\"numpy_random_seed\"])\\n+ torch.set_rng_state(states[\"torch_manual_seed\"])\\n+ torch.cuda.set_rng_state_all(states[\"torch_cuda_manual_seed\"])\\n+ # ^^ safe to call this function even if cuda is not available\\n+ if is_tpu_available():\\n+ xm.set_rng_state(states[\"xm_seed\"])\\n+ logger.info(\"All random states loaded successfully\")\\n+ except:\\n+ logger.info(\"Could not load random states\")\\n \\n \\n def save_custom_state(obj, path, index: int = 0):\\ndiff --git a/src/accelerate/commands/config/cluster.py b/src/accelerate/commands/config/cluster.py\\nindex e86a0abff..0fa3ceab2 100644\\n--- a/src/accelerate/commands/config/cluster.py\\n+++ b/src/accelerate/commands/config/cluster.py\\n@@ -275,6 +275,67 @@ def get_cluster_input():\\n default=\"FULL_STATE_DICT\",\\n )\\n \\n+ megatron_lm_config = {}\\n+ if distributed_type in [DistributedType.MULTI_GPU]:\\n+ use_megatron_lm = _ask_field(\\n+ \"Do you want to use Megatron-LM ? [yes/NO]: \",\\n+ _convert_yes_no_to_bool,\\n+ default=False,\\n+ error_message=\"Please enter yes or no.\",\\n+ )\\n+ if use_megatron_lm:\\n+ distributed_type = DistributedType.MEGATRON_LM\\n+ if distributed_type == DistributedType.MEGATRON_LM:\\n+ prefix = \"megatron_lm_\"\\n+ megatron_lm_config[prefix + \"tp_degree\"] = _ask_field(\\n+ \"What is the Tensor Parallelism degree/size? [1]:\",\\n+ lambda x: int(x),\\n+ default=1,\\n+ error_message=\"Please enter an integer.\",\\n+ )\\n+ if megatron_lm_config[prefix + \"tp_degree\"] > 1:\\n+ megatron_lm_config[prefix + \"sequence_parallelism\"] = _ask_field(\\n+ \"Do you want to enable Sequence Parallelism? [YES/no]: \",\\n+ _convert_yes_no_to_bool,\\n+ default=True,\\n+ error_message=\"Please enter yes or no.\",\\n+ )\\n+\\n+ megatron_lm_config[prefix + \"pp_degree\"] = _ask_field(\\n+ \"What is the Pipeline Parallelism degree/size? [1]:\",\\n+ lambda x: int(x),\\n+ default=1,\\n+ error_message=\"Please enter an integer.\",\\n+ )\\n+ if megatron_lm_config[prefix + \"pp_degree\"] > 1:\\n+ megatron_lm_config[prefix + \"num_micro_batches\"] = _ask_field(\\n+ \"What is the number of micro-batches? [1]:\",\\n+ lambda x: int(x),\\n+ default=1,\\n+ error_message=\"Please enter an integer.\",\\n+ )\\n+\\n+ megatron_lm_config[prefix + \"recompute_activations\"] = _ask_field(\\n+ \"Do you want to enable selective activation recomputation? [YES/no]: \",\\n+ _convert_yes_no_to_bool,\\n+ default=True,\\n+ error_message=\"Please enter yes or no.\",\\n+ )\\n+\\n+ megatron_lm_config[prefix + \"use_distributed_optimizer\"] = _ask_field(\\n+ \"Do you want to use distributed optimizer \"\\n+ \"which shards optimizer state and gradients across data pralellel ranks? [YES/no]: \",\\n+ _convert_yes_no_to_bool,\\n+ default=True,\\n+ error_message=\"Please enter yes or no.\",\\n+ )\\n+\\n+ megatron_lm_config[prefix + \"gradient_clipping\"] = _ask_field(\\n+ \"What is the gradient clipping value based on global L2 Norm (0 to disable)? [1.0]: \",\\n+ lambda x: float(x),\\n+ default=1.0,\\n+ )\\n+\\n if distributed_type == DistributedType.TPU:\\n main_training_function = _ask_field(\\n \"What is the name of the function in your script that should be launched in all parallel scripts? [main]: \",\\n@@ -295,7 +356,7 @@ def get_cluster_input():\\n default=1,\\n error_message=\"Please enter an integer.\",\\n )\\n- elif distributed_type in [DistributedType.FSDP, DistributedType.DEEPSPEED]:\\n+ elif distributed_type in [DistributedType.FSDP, DistributedType.DEEPSPEED, DistributedType.MEGATRON_LM]:\\n num_processes = _ask_field(\\n \"How many GPU(s) should be used for distributed training? [1]:\",\\n lambda x: int(x),\\n@@ -343,6 +404,7 @@ def get_cluster_input():\\n main_training_function=main_training_function,\\n deepspeed_config=deepspeed_config,\\n fsdp_config=fsdp_config,\\n+ megatron_lm_config=megatron_lm_config,\\n use_cpu=use_cpu,\\n rdzv_backend=rdzv_backend,\\n same_network=same_network,\\ndiff --git a/src/accelerate/commands/config/config_args.py b/src/accelerate/commands/config/config_args.py\\nindex 43faf45ca..159f951dd 100644\\n--- a/src/accelerate/commands/config/config_args.py\\n+++ b/src/accelerate/commands/config/config_args.py\\n@@ -146,6 +146,8 @@ class ClusterConfig(BaseConfig):\\n deepspeed_config: dict = None\\n # args for fsdp\\n fsdp_config: dict = None\\n+ # args for megatron_lm\\n+ megatron_lm_config: dict = None\\n # args for TPU\\n downcast_bf16: bool = False\\n \\n@@ -154,6 +156,8 @@ def __post_init__(self):\\n self.deepspeed_config = {}\\n if self.fsdp_config is None:\\n self.fsdp_config = {}\\n+ if self.megatron_lm_config is None:\\n+ self.megatron_lm_config = {}\\n return super().__post_init__()\\n \\n \\ndiff --git a/src/accelerate/commands/launch.py b/src/accelerate/commands/launch.py\\nindex f348bb0a6..c297223cb 100644\\n--- a/src/accelerate/commands/launch.py\\n+++ b/src/accelerate/commands/launch.py\\n@@ -83,6 +83,8 @@ def launch_command_parser(subparsers=None):\\n action=\"store_true\",\\n help=\"Whether or not this should use MPS-enabled GPU device on MacOS machines.\",\\n )\\n+\\n+ # deepspeed args\\n parser.add_argument(\\n \"--use_deepspeed\",\\n default=False,\\n@@ -163,6 +165,8 @@ def launch_command_parser(subparsers=None):\\n type=str,\\n help=\"DeepSpeed multi-node launcher to use.\",\\n )\\n+\\n+ # fsdp args\\n parser.add_argument(\\n \"--use_fsdp\",\\n default=False,\\n@@ -236,6 +240,62 @@ def launch_command_parser(subparsers=None):\\n type=str,\\n help=\"This argument is deprecated. Use `fsdp_transformer_layer_cls_to_wrap` instead.\",\\n )\\n+\\n+ # megatron_lm args\\n+ parser.add_argument(\\n+ \"--use_megatron_lm\",\\n+ default=False,\\n+ action=\"store_true\",\\n+ help=\"Whether to use Megatron-LM.\",\\n+ )\\n+ parser.add_argument(\\n+ \"--megatron_lm_tp_degree\",\\n+ type=int,\\n+ default=1,\\n+ help=\"Megatron-LM\\'s Tensor Parallelism (TP) degree. (useful only when `use_megatron_lm` flag is passed).\",\\n+ )\\n+ parser.add_argument(\\n+ \"--megatron_lm_pp_degree\",\\n+ type=int,\\n+ default=1,\\n+ help=\"Megatron-LM\\'s Pipeline Parallelism (PP) degree. (useful only when `use_megatron_lm` flag is passed).\",\\n+ )\\n+ parser.add_argument(\\n+ \"--megatron_lm_num_micro_batches\",\\n+ type=int,\\n+ default=None,\\n+ help=\"Megatron-LM\\'s number of micro batches when PP degree > 1. (useful only when `use_megatron_lm` flag is passed).\",\\n+ )\\n+ parser.add_argument(\\n+ \"--megatron_lm_sequence_parallelism\",\\n+ default=None,\\n+ type=str,\\n+ help=\"Decides Whether (true|false) to enable Sequence Parallelism when TP degree > 1. \"\\n+ \"(useful only when `use_megatron_lm` flag is passed).\",\\n+ )\\n+ parser.add_argument(\\n+ \"--megatron_lm_recompute_activations\",\\n+ default=None,\\n+ type=str,\\n+ help=\"Decides Whether (true|false) to enable Selective Activation Recomputation. \"\\n+ \"(useful only when `use_megatron_lm` flag is passed).\",\\n+ )\\n+ parser.add_argument(\\n+ \"--megatron_lm_use_distributed_optimizer\",\\n+ default=None,\\n+ type=str,\\n+ help=\"Decides Whether (true|false) to use distributed optimizer \"\\n+ \"which shards optimizer state and gradients across Data Pralellel (DP) ranks. \"\\n+ \"(useful only when `use_megatron_lm` flag is passed).\",\\n+ )\\n+ parser.add_argument(\\n+ \"--megatron_lm_gradient_clipping\",\\n+ default=1.0,\\n+ type=float,\\n+ help=\"Megatron-LM\\'s gradient clipping value based on global L2 Norm (0 to disable). \"\\n+ \"(useful only when `use_megatron_lm` flag is passed).\",\\n+ )\\n+\\n parser.add_argument(\\n \"--tpu\", default=False, action=\"store_true\", help=\"Whether or not this should launch a TPU training.\"\\n )\\n@@ -487,6 +547,22 @@ def multi_gpu_launcher(args):\\n current_env[\"FSDP_BACKWARD_PREFETCH\"] = str(args.fsdp_backward_prefetch_policy)\\n if args.fsdp_state_dict_type is not None:\\n current_env[\"FSDP_STATE_DICT_TYPE\"] = str(args.fsdp_state_dict_type)\\n+\\n+ if args.use_megatron_lm:\\n+ prefix = \"MEGATRON_LM_\"\\n+ current_env[\"USE_MEGATRON_LM\"] = \"true\"\\n+ current_env[prefix + \"TP_DEGREE\"] = str(args.megatron_lm_tp_degree)\\n+ current_env[prefix + \"PP_DEGREE\"] = str(args.megatron_lm_pp_degree)\\n+ current_env[prefix + \"GRADIENT_CLIPPING\"] = str(args.megatron_lm_gradient_clipping)\\n+ if args.megatron_lm_num_micro_batches is not None:\\n+ current_env[prefix + \"NUM_MICRO_BATCHES\"] = str(args.megatron_lm_num_micro_batches)\\n+ if args.megatron_lm_sequence_parallelism is not None:\\n+ current_env[prefix + \"SEQUENCE_PARALLELISM\"] = str(args.megatron_lm_sequence_parallelism)\\n+ if args.megatron_lm_recompute_activations is not None:\\n+ current_env[prefix + \"RECOMPUTE_ACTIVATIONS\"] = str(args.megatron_lm_recompute_activations)\\n+ if args.megatron_lm_use_distributed_optimizer is not None:\\n+ current_env[prefix + \"USE_DISTRIBUTED_OPTIMIZER\"] = str(args.megatron_lm_use_distributed_optimizer)\\n+\\n current_env[\"OMP_NUM_THREADS\"] = str(args.num_cpu_threads_per_process)\\n if is_torch_version(\"<\", \"1.9.0\"):\\n raise NotImplementedError(\"Multi-node training requires pytorch>=1.9.0\")\\n@@ -825,12 +901,14 @@ def launch_command(args):\\n and not args.use_deepspeed\\n and not args.use_fsdp\\n and not args.use_mps_device\\n+ and not args.use_megatron_lm\\n ):\\n args.use_deepspeed = defaults.distributed_type == DistributedType.DEEPSPEED\\n args.multi_gpu = defaults.distributed_type == DistributedType.MULTI_GPU\\n args.tpu = defaults.distributed_type == DistributedType.TPU\\n args.use_fsdp = defaults.distributed_type == DistributedType.FSDP\\n args.use_mps_device = defaults.distributed_type == DistributedType.MPS\\n+ args.use_megatron_lm = defaults.distributed_type == DistributedType.MEGATRON_LM\\n if not args.use_mps_device:\\n if args.gpu_ids is None:\\n if defaults.gpu_ids is not None:\\n@@ -851,6 +929,8 @@ def launch_command(args):\\n if \"fsdp\" not in arg_to_set:\\n arg_to_set = \"fsdp_\" + arg_to_set\\n setattr(args, arg_to_set, defaults.fsdp_config[k])\\n+ for k in defaults.megatron_lm_config:\\n+ setattr(args, k, defaults.megatron_lm_config[k])\\n continue\\n \\n # Those args are handled separately\\n@@ -901,6 +981,8 @@ def launch_command(args):\\n deepspeed_launcher(args)\\n elif args.use_fsdp and not args.cpu:\\n multi_gpu_launcher(args)\\n+ elif args.use_megatron_lm and not args.cpu:\\n+ multi_gpu_launcher(args)\\n elif args.multi_gpu and not args.cpu:\\n multi_gpu_launcher(args)\\n elif args.tpu and not args.cpu:\\ndiff --git a/src/accelerate/data_loader.py b/src/accelerate/data_loader.py\\nindex fac1b50e2..c4a015156 100644\\n--- a/src/accelerate/data_loader.py\\n+++ b/src/accelerate/data_loader.py\\n@@ -625,7 +625,7 @@ def prepare_data_loader(\\n sampler_is_batch_sampler = False\\n generator = getattr(dataloader, \"generator\", None)\\n # No change if no multiprocess\\n- if num_processes != 1 and not dispatch_batches:\\n+ if (num_processes != 1 or state.distributed_type == DistributedType.MEGATRON_LM) and not dispatch_batches:\\n if isinstance(new_dataset, IterableDataset):\\n if getattr(dataloader.dataset, \"generator\", None) is not None:\\n generator = dataloader.dataset.generator\\ndiff --git a/src/accelerate/logging.py b/src/accelerate/logging.py\\nindex 2c67e24eb..dca093215 100644\\n--- a/src/accelerate/logging.py\\n+++ b/src/accelerate/logging.py\\n@@ -15,6 +15,7 @@\\n import logging\\n \\n from .state import AcceleratorState\\n+from .utils import DistributedType\\n \\n \\n class MultiProcessAdapter(logging.LoggerAdapter):\\n@@ -28,7 +29,12 @@ class MultiProcessAdapter(logging.LoggerAdapter):\\n @staticmethod\\n def _should_log(main_process_only):\\n \"Check if log should be performed\"\\n- return not main_process_only or (main_process_only and AcceleratorState().local_process_index == 0)\\n+ state = AcceleratorState()\\n+ if state.distributed_type != DistributedType.MEGATRON_LM:\\n+ process_index_flag = state.local_process_index == 0\\n+ else:\\n+ process_index_flag = state.process_index == state.num_processes - 1\\n+ return not main_process_only or (main_process_only and process_index_flag)\\n \\n def log(self, level, msg, *args, **kwargs):\\n \"\"\"\\ndiff --git a/src/accelerate/state.py b/src/accelerate/state.py\\nindex a700cfad6..82ea288f0 100644\\n--- a/src/accelerate/state.py\\n+++ b/src/accelerate/state.py\\n@@ -70,6 +70,7 @@ def __init__(\\n cpu: bool = False,\\n deepspeed_plugin=None,\\n fsdp_plugin=None,\\n+ megatron_lm_plugin=None,\\n _from_accelerator: bool = False,\\n **kwargs,\\n ):\\n@@ -162,6 +163,10 @@ def __init__(\\n if self.mixed_precision != \"no\":\\n fsdp_plugin.set_mixed_precision(self.mixed_precision)\\n self.fsdp_plugin = fsdp_plugin\\n+ if os.environ.get(\"USE_MEGATRON_LM\", \"false\") == \"true\":\\n+ self.distributed_type = DistributedType.MEGATRON_LM\\n+ megatron_lm_plugin.set_mixed_precision(self.mixed_precision)\\n+ self.megatron_lm_plugin = megatron_lm_plugin\\n elif get_int_from_env([\"PMI_SIZE\", \"OMPI_COMM_WORLD_SIZE\", \"MV2_COMM_WORLD_SIZE\", \"WORLD_SIZE\"], 1) > 1:\\n self.distributed_type = DistributedType.MULTI_CPU\\n if is_ccl_available() and get_int_from_env([\"CCL_WORKER_COUNT\"], 0) > 0:\\ndiff --git a/src/accelerate/utils/__init__.py b/src/accelerate/utils/__init__.py\\nindex 4bc71ba83..b472ec556 100644\\n--- a/src/accelerate/utils/__init__.py\\n+++ b/src/accelerate/utils/__init__.py\\n@@ -13,6 +13,7 @@\\n InitProcessGroupKwargs,\\n KwargsHandler,\\n LoggerType,\\n+ MegatronLMPlugin,\\n PrecisionType,\\n RNGType,\\n SageMakerDistributedType,\\n@@ -28,6 +29,7 @@\\n is_comet_ml_available,\\n is_datasets_available,\\n is_deepspeed_available,\\n+ is_megatron_lm_available,\\n is_rich_available,\\n is_sagemaker_available,\\n is_tensorboard_available,\\n@@ -94,6 +96,23 @@\\n )\\n \\n from .launch import PrepareForLaunch, _filter_args, get_launch_prefix\\n+from .megatron_lm import (\\n+ AbstractTrainStep,\\n+ BertTrainStep,\\n+ GPTTrainStep,\\n+ MegatronEngine,\\n+ MegatronLMDummyDataLoader,\\n+ MegatronLMDummyScheduler,\\n+ MegatronLMOptimizerWrapper,\\n+ MegatronLMSchedulerWrapper,\\n+ T5TrainStep,\\n+ avg_losses_across_data_parallel_group,\\n+)\\n+from .megatron_lm import initialize as megatron_lm_initialize\\n+from .megatron_lm import prepare_data_loader as megatron_lm_prepare_data_loader\\n+from .megatron_lm import prepare_model as megatron_lm_prepare_model\\n+from .megatron_lm import prepare_optimizer as megatron_lm_prepare_optimizer\\n+from .megatron_lm import prepare_scheduler as megatron_lm_prepare_scheduler\\n from .memory import find_executable_batch_size\\n from .other import (\\n extract_model_from_parallel,\\ndiff --git a/src/accelerate/utils/dataclasses.py b/src/accelerate/utils/dataclasses.py\\nindex 9f6e30bec..8227fa5bc 100644\\n--- a/src/accelerate/utils/dataclasses.py\\n+++ b/src/accelerate/utils/dataclasses.py\\n@@ -16,6 +16,7 @@\\n General namespace and dataclass related classes\\n \"\"\"\\n \\n+import argparse\\n import copy\\n import enum\\n import functools\\n@@ -24,7 +25,8 @@\\n import warnings\\n from dataclasses import dataclass, field\\n from datetime import timedelta\\n-from typing import Any, Callable, Iterable, Optional\\n+from distutils.util import strtobool\\n+from typing import Any, Callable, Dict, Iterable, List, Optional\\n \\n import torch\\n \\n@@ -127,6 +129,7 @@ class DistributedType(str, enum.Enum):\\n FSDP = \"FSDP\"\\n TPU = \"TPU\"\\n MPS = \"MPS\"\\n+ MEGATRON_LM = \"MEGATRON_LM\"\\n \\n \\n class SageMakerDistributedType(str, enum.Enum):\\n@@ -644,3 +647,351 @@ def load_optimizer(self, accelerator, optimizer, model, input_dir, optimizer_ind\\n # called from all ranks, though only rank0 has a valid param for full_osd\\n sharded_osd = FSDP.scatter_full_optim_state_dict(full_osd, model)\\n optimizer.load_state_dict(sharded_osd)\\n+\\n+\\n+@dataclass\\n+class MegatronLMPlugin:\\n+ \"\"\"\\n+ Plugin for Megatron-LM to enable tensor, pipeline, sequence and data parallelism. Also to enable selective\\n+ activation recomputation and optimized fused kernels.\\n+ \"\"\"\\n+\\n+ tp_degree: int = field(default=None, metadata={\"help\": \"tensor parallelism degree.\"})\\n+ pp_degree: int = field(default=None, metadata={\"help\": \"pipeline parallelism degree.\"})\\n+ num_micro_batches: int = field(default=None, metadata={\"help\": \"number of micro-batches.\"})\\n+ gradient_clipping: float = field(\\n+ default=None, metadata={\"help\": \"gradient clipping value based on global L2 Norm (0 to disable)\"}\\n+ )\\n+ sequence_parallelism: bool = field(\\n+ default=None,\\n+ metadata={\"help\": \"enable sequence parallelism\"},\\n+ )\\n+ recompute_activation: bool = field(\\n+ default=None,\\n+ metadata={\"help\": \"enable selective activation recomputation\"},\\n+ )\\n+ use_distributed_optimizer: bool = field(\\n+ default=None,\\n+ metadata={\"help\": \"enable distributed optimizer\"},\\n+ )\\n+ pipeline_model_parallel_split_rank: int = field(\\n+ default=None, metadata={\"help\": \"Rank where encoder and decoder should be split.\"}\\n+ )\\n+ num_layers_per_virtual_pipeline_stage: int = field(\\n+ default=None, metadata={\"help\": \"Number of layers per virtual pipeline stage.\"}\\n+ )\\n+ is_train_batch_min: str = field(\\n+ default=True,\\n+ metadata={\"help\": \"If both train & eval dataloaders are specified, this will decide the micro_batch_size\"},\\n+ )\\n+ train_iters: int = field(\\n+ default=None,\\n+ metadata={\\n+ \"help\": \"Total number of iterations to train over all training runs. \"\\n+ \"Note that either train-iters or train-samples should be provided when using `MegatronLMDummyScheduler`\"\\n+ },\\n+ )\\n+ train_samples: int = field(\\n+ default=None,\\n+ metadata={\\n+ \"help\": \"Total number of samples to train over all training runs. \"\\n+ \"Note that either train-iters or train-samples should be provided when using `MegatronLMDummyScheduler`\"\\n+ },\\n+ )\\n+ weight_decay_incr_style: str = field(\\n+ default=\"constant\",\\n+ metadata={\"help\": \\'Weight decay increment function. choices=[\"constant\", \"linear\", \"cosine\"]. \\'},\\n+ )\\n+ start_weight_decay: float = field(\\n+ default=None,\\n+ metadata={\"help\": \"Initial weight decay coefficient for L2 regularization.\"},\\n+ )\\n+ end_weight_decay: float = field(\\n+ default=None,\\n+ metadata={\"help\": \"End of run weight decay coefficient for L2 regularization.\"},\\n+ )\\n+ lr_decay_style: str = field(\\n+ default=\"linear\",\\n+ metadata={\"help\": \"Learning rate decay function. choices=[\\'constant\\', \\'linear\\', \\'cosine\\'].\"},\\n+ )\\n+ lr_decay_iters: int = field(\\n+ default=None,\\n+ metadata={\"help\": \"Number of iterations for learning rate decay. If None defaults to `train_iters`.\"},\\n+ )\\n+ lr_decay_samples: int = field(\\n+ default=None,\\n+ metadata={\"help\": \"Number of samples for learning rate decay. If None defaults to `train_samples`.\"},\\n+ )\\n+ lr_warmup_iters: int = field(\\n+ default=None,\\n+ metadata={\"help\": \"number of iterations to linearly warmup learning rate over.\"},\\n+ )\\n+ lr_warmup_samples: int = field(\\n+ default=None,\\n+ metadata={\"help\": \"number of samples to linearly warmup learning rate over.\"},\\n+ )\\n+ lr_warmup_fraction: float = field(\\n+ default=None,\\n+ metadata={\"help\": \"fraction of lr-warmup-(iters/samples) to linearly warmup learning rate over.\"},\\n+ )\\n+ min_lr: float = field(\\n+ default=0,\\n+ metadata={\"help\": \"Minumum value for learning rate. The scheduler clip values below this threshold.\"},\\n+ )\\n+ consumed_samples: List[int] = field(\\n+ default=None,\\n+ metadata={\\n+ \"help\": \"Number of samples consumed in the same order as the dataloaders to `accelerator.prepare` call.\"\\n+ },\\n+ )\\n+ no_wd_decay_cond: Optional[Callable] = field(default=None, metadata={\"help\": \"Condition to disable weight decay.\"})\\n+ scale_lr_cond: Optional[Callable] = field(default=None, metadata={\"help\": \"Condition to scale learning rate.\"})\\n+ lr_mult: float = field(default=1.0, metadata={\"help\": \"Learning rate multiplier.\"})\\n+ megatron_dataset_flag: bool = field(\\n+ default=False,\\n+ metadata={\"help\": \"Whether the format of dataset follows Megatron-LM Indexed/Cached/MemoryMapped format.\"},\\n+ )\\n+ seq_length: int = field(\\n+ default=None,\\n+ metadata={\"help\": \"Maximum sequence length to process.\"},\\n+ )\\n+ encoder_seq_length: int = field(\\n+ default=None,\\n+ metadata={\"help\": \"Maximum sequence length to process for the encoder.\"},\\n+ )\\n+ decoder_seq_length: int = field(\\n+ default=None,\\n+ metadata={\"help\": \"Maximum sequence length to process for the decoder.\"},\\n+ )\\n+ tensorboard_dir: str = field(\\n+ default=None,\\n+ metadata={\"help\": \"Path to save tensorboard logs.\"},\\n+ )\\n+ set_all_logging_options: bool = field(\\n+ default=False,\\n+ metadata={\"help\": \"Whether to set all logging options.\"},\\n+ )\\n+ custom_train_step_class: Optional[Any] = field(\\n+ default=None,\\n+ metadata={\"help\": \"Custom train step class.\"},\\n+ )\\n+ custom_train_step_kwargs: Optional[Dict[str, Any]] = field(\\n+ default=None,\\n+ metadata={\"help\": \"Custom train step kwargs.\"},\\n+ )\\n+ eval_iters: int = field(\\n+ default=100, metadata={\"help\": \"Number of iterations to run for evaluation validation/test for.\"}\\n+ )\\n+ eval_interval: int = field(\\n+ default=1000, metadata={\"help\": \"Interval between running evaluation on validation set.\"}\\n+ )\\n+\\n+ def __post_init__(self):\\n+ prefix = \"MEGATRON_LM_\"\\n+ if self.tp_degree is None:\\n+ self.tp_degree = int(os.environ.get(prefix + \"TP_DEGREE\", 1))\\n+ if self.pp_degree is None:\\n+ self.pp_degree = int(os.environ.get(prefix + \"PP_DEGREE\", 1))\\n+ if self.num_micro_batches is None:\\n+ self.num_micro_batches = int(os.environ.get(prefix + \"NUM_MICRO_BATCHES\", 1))\\n+ if self.gradient_clipping is None:\\n+ self.gradient_clipping = float(os.environ.get(prefix + \"GRADIENT_CLIPPING\", 1.0))\\n+ if self.recompute_activation is None:\\n+ self.recompute_activation = strtobool(os.environ.get(prefix + \"RECOMPUTE_ACTIVATION\", \"False\")) == 1\\n+ if self.use_distributed_optimizer is None:\\n+ self.use_distributed_optimizer = (\\n+ strtobool(os.environ.get(prefix + \"USE_DISTRIBUTED_OPTIMIZER\", \"False\")) == 1\\n+ )\\n+ if self.sequence_parallelism is None:\\n+ self.sequence_parallelism = strtobool(os.environ.get(prefix + \"SEQUENCE_PARALLELISM\", \"False\")) == 1\\n+\\n+ if self.pp_degree > 1 or self.use_distributed_optimizer:\\n+ self.DDP_impl = \"local\"\\n+ else:\\n+ self.DDP_impl = \"torch\"\\n+\\n+ if self.consumed_samples is not None:\\n+ if len(self.consumed_samples) == 1:\\n+ self.consumed_samples.extend([0, 0])\\n+ elif len(self.consumed_samples) == 2:\\n+ self.consumed_samples.append(0)\\n+\\n+ self.megatron_lm_default_args = {\\n+ \"tensor_model_parallel_size\": self.tp_degree,\\n+ \"pipeline_model_parallel_size\": self.pp_degree,\\n+ \"pipeline_model_parallel_split_rank\": self.pipeline_model_parallel_split_rank,\\n+ \"num_layers_per_virtual_pipeline_stage\": self.num_layers_per_virtual_pipeline_stage,\\n+ \"DDP_impl\": self.DDP_impl,\\n+ \"use_distributed_optimizer\": self.use_distributed_optimizer,\\n+ \"sequence_parallel\": self.sequence_parallelism,\\n+ \"clip_grad\": self.gradient_clipping,\\n+ \"num_micro_batches\": self.num_micro_batches,\\n+ \"consumed_samples\": self.consumed_samples,\\n+ \"no_wd_decay_cond\": self.no_wd_decay_cond,\\n+ \"scale_lr_cond\": self.scale_lr_cond,\\n+ \"lr_mult\": self.lr_mult,\\n+ \"megatron_dataset_flag\": self.megatron_dataset_flag,\\n+ \"eval_iters\": self.eval_iters,\\n+ \"eval_interval\": self.eval_interval,\\n+ }\\n+ if self.recompute_activation:\\n+ self.megatron_lm_default_args[\"recompute_granularity\"] = \"selective\"\\n+ if self.tensorboard_dir is not None:\\n+ self.megatron_lm_default_args[\"tensorboard_dir\"] = self.tensorboard_dir\\n+ if self.set_all_logging_options:\\n+ self.set_tensorboard_logging_options()\\n+\\n+ def set_network_size_args(self, model, batch_data=None):\\n+ # Check if the model is either BERT, GPT or T5 else raise error\\n+ # set \\'num_layers\\', \\'hidden_size\\', \\'num_attention_heads\\', \\'max_position_embeddings\\'\\n+ if \"megatron-bert\" in model.config.model_type.lower():\\n+ model_type_name = \"bert\"\\n+ num_layers = model.config.num_hidden_layers\\n+ hidden_size = model.config.hidden_size\\n+ num_attention_heads = model.config.num_attention_heads\\n+ max_position_embeddings = model.config.max_position_embeddings\\n+ num_labels = model.config.num_labels\\n+ orig_vocab_size = model.config.vocab_size\\n+ if \"maskedlm\" in model.__class__.__name__.lower():\\n+ pretraining_flag = True\\n+ if self.seq_length is not None:\\n+ if self.encoder_seq_length is not None:\\n+ warnings.warn(\"Both `seq_length` and `encoder_seq_length` are set. Using `encoder_seq_length`.\")\\n+ self.seq_length = self.encoder_seq_length\\n+ elif self.encoder_seq_length is not None:\\n+ self.seq_length = self.encoder_seq_length\\n+ elif batch_data is not None:\\n+ self.seq_length = batch_data[\"input_ids\"].shape[1]\\n+ else:\\n+ self.seq_length = max_position_embeddings\\n+ self.megatron_lm_default_args[\"seq_length\"] = self.seq_length\\n+ elif \"gpt2\" in model.config.model_type.lower():\\n+ model_type_name = \"gpt\"\\n+ num_layers = model.config.n_layer\\n+ hidden_size = model.config.n_embd\\n+ num_attention_heads = model.config.n_head\\n+ max_position_embeddings = model.config.n_positions\\n+ orig_vocab_size = model.config.vocab_size\\n+ pretraining_flag = True\\n+ if self.seq_length is not None:\\n+ if self.decoder_seq_length is not None:\\n+ warnings.warn(\"Both `seq_length` and `decoder_seq_length` are set. Using `decoder_seq_length`.\")\\n+ self.seq_length = self.decoder_seq_length\\n+ elif self.decoder_seq_length is not None:\\n+ self.seq_length = self.decoder_seq_length\\n+ elif batch_data is not None:\\n+ self.seq_length = batch_data[\"input_ids\"].shape[1]\\n+ else:\\n+ self.seq_length = max_position_embeddings\\n+ self.megatron_lm_default_args[\"seq_length\"] = self.seq_length\\n+ elif \"t5\" in model.config.model_type.lower():\\n+ model_type_name = \"t5\"\\n+ num_layers = model.config.num_layers\\n+ hidden_size = model.config.d_model\\n+ num_attention_heads = model.config.num_heads\\n+ max_position_embeddings = model.config.n_positions if hasattr(model.config, \"n_positions\") else 1024\\n+ orig_vocab_size = model.config.vocab_size\\n+ pretraining_flag = True\\n+ if self.encoder_seq_length is None:\\n+ if batch_data is not None:\\n+ self.encoder_seq_length = batch_data[\"input_ids\"].shape[1]\\n+ else:\\n+ self.encoder_seq_length = max_position_embeddings\\n+ if self.decoder_seq_length is None:\\n+ if batch_data is not None:\\n+ self.decoder_seq_length = batch_data[\"labels\"].shape[1]\\n+ else:\\n+ self.decoder_seq_length = max_position_embeddings\\n+\\n+ self.megatron_lm_default_args[\"encoder_seq_length\"] = self.encoder_seq_length\\n+ self.megatron_lm_default_args[\"decoder_seq_length\"] = self.decoder_seq_length\\n+ else:\\n+ raise ValueError(\\n+ \"🤗 Accelerate Megatron-LM integration supports only BERT, GPT and T5 model. \"\\n+ \"Please check the model you are using is one of those.\"\\n+ )\\n+\\n+ self.megatron_lm_default_args[\"model_type_name\"] = model_type_name\\n+ self.megatron_lm_default_args[\"num_layers\"] = num_layers\\n+ self.megatron_lm_default_args[\"hidden_size\"] = hidden_size\\n+ self.megatron_lm_default_args[\"num_attention_heads\"] = num_attention_heads\\n+ self.megatron_lm_default_args[\"max_position_embeddings\"] = max_position_embeddings\\n+ self.megatron_lm_default_args[\"pretraining_flag\"] = pretraining_flag\\n+ self.megatron_lm_default_args[\"orig_vocab_size\"] = orig_vocab_size\\n+ self.megatron_lm_default_args[\"model_return_dict\"] = model.config.return_dict\\n+ if model_type_name == \"bert\":\\n+ self.megatron_lm_default_args[\"num_labels\"] = num_labels\\n+\\n+ def set_mixed_precision(self, mixed_precision):\\n+ if mixed_precision == \"fp16\":\\n+ self.megatron_lm_default_args[\"fp16\"] = True\\n+ elif mixed_precision == \"bf16\":\\n+ self.megatron_lm_default_args[\"bf16\"] = True\\n+ self.DDP_impl = \"local\"\\n+ self.megatron_lm_default_args[\"DDP_impl\"] = self.DDP_impl\\n+\\n+ def set_training_args(self, micro_batch_size, dp_degree):\\n+ self.data_parallel_size = dp_degree\\n+ self.micro_batch_size = micro_batch_size\\n+ self.global_batch_size = dp_degree * micro_batch_size * self.num_micro_batches\\n+ self.megatron_lm_default_args[\"data_parallel_size\"] = self.data_parallel_size\\n+ self.megatron_lm_default_args[\"micro_batch_size\"] = self.micro_batch_size\\n+ self.megatron_lm_default_args[\"global_batch_size\"] = self.global_batch_size\\n+\\n+ def set_optimizer_type(self, optimizer):\\n+ optimizer_name = optimizer.__class__.__name__.lower()\\n+ if \"adam\" in optimizer_name:\\n+ self.megatron_lm_default_args[\"optimizer\"] = \"adam\"\\n+ self.megatron_lm_default_args[\"adam_beta1\"] = optimizer.defaults[\"betas\"][0]\\n+ self.megatron_lm_default_args[\"adam_beta2\"] = optimizer.defaults[\"betas\"][1]\\n+ self.megatron_lm_default_args[\"adam_eps\"] = optimizer.defaults[\"eps\"]\\n+ elif \"sgd\" in optimizer_name:\\n+ self.megatron_lm_default_args[\"optimizer\"] = \"sgd\"\\n+ self.megatron_lm_default_args[\"sgd_momentum\"] = optimizer.defaults[\"momentum\"]\\n+ else:\\n+ raise ValueError(f\"Optimizer {optimizer_name} is not supported by Megatron-LM\")\\n+\\n+ self.megatron_lm_default_args[\"lr\"] = optimizer.defaults[\"lr\"]\\n+ self.megatron_lm_default_args[\"weight_decay\"] = optimizer.defaults[\"weight_decay\"]\\n+\\n+ def set_scheduler_args(self, scheduler):\\n+ if self.train_iters is None:\\n+ self.train_iters = scheduler.total_num_steps // self.megatron_lm_default_args[\"data_parallel_size\"]\\n+ if self.train_samples is not None:\\n+ self.train_samples = None\\n+ warnings.warn(\\n+ \"Ignoring `train_samples` as `train_iters` based on scheduler is being used for training.\"\\n+ )\\n+ if self.lr_warmup_iters is None:\\n+ self.lr_warmup_iters = scheduler.warmup_num_steps // self.megatron_lm_default_args[\"data_parallel_size\"]\\n+ if self.lr_warmup_samples is not None:\\n+ warnings.warn(\\n+ \"Ignoring `lr_warmup_samples` as `lr_warmup_iters` based on scheduler is being used for training.\"\\n+ )\\n+ self.lr_warmup_samples = 0\\n+\\n+ self.megatron_lm_default_args[\"train_iters\"] = self.train_iters\\n+ self.megatron_lm_default_args[\"lr_warmup_iters\"] = self.lr_warmup_iters\\n+ self.megatron_lm_default_args[\"train_samples\"] = self.train_samples\\n+ self.megatron_lm_default_args[\"lr_warmup_samples\"] = self.lr_warmup_samples\\n+ self.megatron_lm_default_args[\"lr_decay_iters\"] = self.lr_decay_iters\\n+ self.megatron_lm_default_args[\"lr_decay_samples\"] = self.lr_decay_samples\\n+ self.megatron_lm_default_args[\"lr_warmup_fraction\"] = self.lr_warmup_fraction\\n+ self.megatron_lm_default_args[\"lr_decay_style\"] = self.lr_decay_style\\n+ self.megatron_lm_default_args[\"weight_decay_incr_style\"] = self.weight_decay_incr_style\\n+ self.megatron_lm_default_args[\"start_weight_decay\"] = self.start_weight_decay\\n+ self.megatron_lm_default_args[\"end_weight_decay\"] = self.end_weight_decay\\n+ self.megatron_lm_default_args[\"min_lr\"] = self.min_lr\\n+\\n+ def set_tensorboard_logging_options(self):\\n+ from megatron.arguments import _add_logging_args\\n+\\n+ parser = argparse.ArgumentParser()\\n+ parser = _add_logging_args(parser)\\n+ logging_args = parser.parse_known_args()\\n+ self.dataset_args = vars(logging_args[0])\\n+ for key, value in self.dataset_args.items():\\n+ if key.startswith(\"log_\"):\\n+ self.megatron_lm_default_args[key] = True\\n+ elif key.startswith(\"no_log_\"):\\n+ self.megatron_lm_default_args[key.replace(\"no_\", \"\")] = True\\ndiff --git a/src/accelerate/utils/imports.py b/src/accelerate/utils/imports.py\\nindex eb5fda468..58009d03b 100644\\n--- a/src/accelerate/utils/imports.py\\n+++ b/src/accelerate/utils/imports.py\\n@@ -87,6 +87,10 @@ def is_bf16_available(ignore_tpu=False):\\n return False\\n \\n \\n+def is_megatron_lm_available():\\n+ return importlib.util.find_spec(\"megatron\") is not None\\n+\\n+\\n def is_transformers_available():\\n return importlib.util.find_spec(\"transformers\") is not None\\n \\ndiff --git a/src/accelerate/utils/megatron_lm.py b/src/accelerate/utils/megatron_lm.py\\nnew file mode 100644\\nindex 000000000..cb988e360\\n--- /dev/null\\n+++ b/src/accelerate/utils/megatron_lm.py\\n@@ -0,0 +1,1199 @@\\n+# Copyright 2022 The HuggingFace Team. All rights reserved.\\n+#\\n+# Licensed under the Apache License, Version 2.0 (the \"License\");\\n+# you may not use this file except in compliance with the License.\\n+# You may obtain a copy of the License at\\n+#\\n+# http://www.apache.org/licenses/LICENSE-2.0\\n+#\\n+# Unless required by applicable law or agreed to in writing, software\\n+# distributed under the License is distributed on an \"AS IS\" BASIS,\\n+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\\n+# See the License for the specific language governing permissions and\\n+# limitations under the License.\\n+\\n+import argparse\\n+import math\\n+from abc import ABC\\n+from functools import partial\\n+\\n+import torch\\n+import torch.nn.functional as F\\n+from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss\\n+\\n+from ..optimizer import AcceleratedOptimizer\\n+from ..scheduler import AcceleratedScheduler\\n+from .imports import is_megatron_lm_available, is_transformers_available\\n+from .operations import send_to_device\\n+\\n+\\n+if is_transformers_available():\\n+ from transformers.modeling_outputs import (\\n+ CausalLMOutputWithCrossAttentions,\\n+ Seq2SeqLMOutput,\\n+ SequenceClassifierOutput,\\n+ )\\n+\\n+\\n+if is_megatron_lm_available():\\n+ from megatron import (\\n+ get_args,\\n+ get_num_microbatches,\\n+ get_tensorboard_writer,\\n+ get_timers,\\n+ mpu,\\n+ print_rank_0,\\n+ print_rank_last,\\n+ )\\n+ from megatron.arguments import _add_data_args, _add_validation_args, parse_args, validate_args\\n+ from megatron.checkpointing import load_args_from_checkpoint, load_checkpoint, save_checkpoint\\n+ from megatron.data.data_samplers import MegatronPretrainingRandomSampler, MegatronPretrainingSampler\\n+ from megatron.global_vars import set_global_variables\\n+ from megatron.initialize import (\\n+ _compile_dependencies,\\n+ _init_autoresume,\\n+ _set_random_seed,\\n+ set_jit_fusion_options,\\n+ write_args_to_tensorboard,\\n+ )\\n+ from megatron.model import BertModel, GPTModel, ModelType, T5Model\\n+ from megatron.model.classification import Classification\\n+ from megatron.optimizer import get_megatron_optimizer\\n+ from megatron.schedules import get_forward_backward_func\\n+ from megatron.tokenizer.tokenizer import _vocab_size_with_padding\\n+ from megatron.training import get_model, get_optimizer_param_scheduler, training_log\\n+ from megatron.utils import (\\n+ average_losses_across_data_parallel_group,\\n+ calc_params_l2_norm,\\n+ get_ltor_masks_and_position_ids,\\n+ )\\n+\\n+\\n+# model utilities\\n+def model_provider_func(pre_process=True, post_process=True, add_encoder=True, add_decoder=True):\\n+ \"\"\"Build the model.\"\"\"\\n+ args = get_args()\\n+ mode = \"pre-training\" if args.pretraining_flag else \"fine-tuning\"\\n+ if args.rank == 0:\\n+ print(f\"Building {args.model_type_name} model in the {mode} mode.\")\\n+ print(\\n+ \"The Megatron LM model weights are initialized at random in `accelerator.prepare`. \"\\n+ \"Please use `accelerator.load_checkpoint` to load a pre-trained checkpoint matching the distributed setup.\"\\n+ )\\n+ if args.model_type_name == \"bert\":\\n+ if args.pretraining_flag:\\n+ num_tokentypes = 2 if args.bert_binary_head else 0\\n+ model = BertModel(\\n+ num_tokentypes=num_tokentypes,\\n+ add_binary_head=args.bert_binary_head,\\n+ parallel_output=True,\\n+ pre_process=pre_process,\\n+ post_process=post_process,\\n+ )\\n+ else:\\n+\\n+ model = Classification(\\n+ num_classes=args.num_labels, num_tokentypes=2, pre_process=pre_process, post_process=post_process\\n+ )\\n+ elif args.model_type_name == \"gpt\":\\n+ model = GPTModel(num_tokentypes=0, parallel_output=True, pre_process=pre_process, post_process=post_process)\\n+ elif args.model_type_name == \"t5\":\\n+ model = T5Model(\\n+ num_tokentypes=0,\\n+ parallel_output=True,\\n+ pre_process=pre_process,\\n+ post_process=post_process,\\n+ add_encoder=add_encoder,\\n+ add_decoder=add_decoder,\\n+ )\\n+ else:\\n+ raise ValueError(f\"Unsupported model type: {args.model_type_name}\")\\n+ return model\\n+\\n+\\n+def prepare_model(accelerator):\\n+ accelerator.print(\"Preparing model\")\\n+ args = get_args()\\n+ if args.model_type_name == \"bert\" or args.model_type_name == \"gpt\":\\n+ model_type = ModelType.encoder_or_decoder\\n+ elif args.model_type_name == \"t5\":\\n+ model_type = ModelType.encoder_and_decoder\\n+ if args.pipeline_model_parallel_split_rank is None and args.pipeline_model_parallel_size > 1:\\n+ args.pipeline_model_parallel_split_rank = args.pipeline_model_parallel_size // 2\\n+ model = get_model(model_provider_func, model_type)\\n+ return model\\n+\\n+\\n+# dataloader utilities\\n+class MegatronLMDummyDataLoader:\\n+ \"\"\"\\n+ Dummy dataloader presents model parameters or param groups, this is primarily used to follow conventional training\\n+\\n+ Args:\\n+ **dataset_kwargs: Megatron data arguments.\\n+ \"\"\"\\n+\\n+ def __init__(self, **dataset_kwargs):\\n+ parser = argparse.ArgumentParser()\\n+ parser = _add_data_args(parser)\\n+ parser = _add_validation_args(parser)\\n+ data_args = parser.parse_known_args()\\n+ self.dataset_args = vars(data_args[0])\\n+ self.dataset_args.update(dataset_kwargs)\\n+ self.dataset_args[\"megatron_dataset_flag\"] = True\\n+\\n+ def set_megatron_data_args(self):\\n+ args = get_args()\\n+ for key, value in self.dataset_args.items():\\n+ setattr(args, key, value)\\n+\\n+ def get_train_valid_test_datasets_provider(self):\\n+ def train_valid_test_datasets_provider(train_val_test_num_samples):\\n+ \"\"\"Build train, valid, and test datasets.\"\"\"\\n+ args = get_args()\\n+ dataset_args = {\\n+ \"data_prefix\": args.data_path,\\n+ \"data_impl\": args.data_impl,\\n+ \"splits_string\": args.split,\\n+ \"train_valid_test_num_samples\": train_val_test_num_samples,\\n+ \"skip_warmup\": (not args.mmap_warmup),\\n+ \"seed\": args.seed,\\n+ }\\n+ if args.model_type_name == \"bert\":\\n+ dataset_args.update(\\n+ {\\n+ \"max_seq_length\": args.seq_length,\\n+ \"masked_lm_prob\": args.mask_prob,\\n+ \"short_seq_prob\": args.short_seq_prob,\\n+ \"binary_head\": args.bert_binary_head,\\n+ }\\n+ )\\n+ elif args.model_type_name == \"gpt\":\\n+ dataset_args.update(\\n+ {\\n+ \"seq_length\": args.seq_length,\\n+ }\\n+ )\\n+ elif args.model_type_name == \"t5\":\\n+ dataset_args.update(\\n+ {\\n+ \"max_seq_length\": args.encoder_seq_length,\\n+ \"max_seq_length_dec\": args.decoder_seq_length,\\n+ \"masked_lm_prob\": args.mask_prob,\\n+ \"short_seq_prob\": args.short_seq_prob,\\n+ \"dataset_type\": \"t5\",\\n+ }\\n+ )\\n+ else:\\n+ raise ValueError(f\"Unsupported model type: {args.model_type_name}\")\\n+ if args.model_type_name == \"gpt\":\\n+ from megatron.data.gpt_dataset import build_train_valid_test_datasets\\n+ else:\\n+ from megatron.data.dataset_utils import build_train_valid_test_datasets\\n+ train_ds, valid_ds, test_ds = build_train_valid_test_datasets(**dataset_args)\\n+ return train_ds, valid_ds, test_ds\\n+\\n+ return train_valid_test_datasets_provider\\n+\\n+ def build_pretraining_data_loader(self, dataset, consumed_samples):\\n+ if dataset is None:\\n+ return None\\n+ args = get_args()\\n+ micro_batch_size = args.micro_batch_size * args.num_micro_batches\\n+\\n+ # Megatron sampler\\n+ if args.dataloader_type == \"single\":\\n+ batch_sampler = MegatronPretrainingSampler(\\n+ total_samples=len(dataset),\\n+ consumed_samples=consumed_samples,\\n+ micro_batch_size=micro_batch_size,\\n+ data_parallel_rank=mpu.get_data_parallel_rank(),\\n+ data_parallel_size=mpu.get_data_parallel_world_size(),\\n+ )\\n+ elif args.dataloader_type == \"cyclic\":\\n+ batch_sampler = MegatronPretrainingRandomSampler(\\n+ dataset,\\n+ total_samples=len(dataset),\\n+ consumed_samples=consumed_samples,\\n+ micro_batch_size=micro_batch_size,\\n+ data_parallel_rank=mpu.get_data_parallel_rank(),\\n+ data_parallel_size=mpu.get_data_parallel_world_size(),\\n+ data_sharding=args.data_sharding,\\n+ )\\n+ else:\\n+ raise Exception(\"{} dataloader type is not supported.\".format(args.dataloader_type))\\n+\\n+ # Torch dataloader.\\n+ return torch.utils.data.DataLoader(\\n+ dataset, batch_sampler=batch_sampler, num_workers=args.num_workers, pin_memory=True\\n+ )\\n+\\n+ def build_train_valid_test_data_iterators(self):\\n+ def cyclic_iter(iter):\\n+ while True:\\n+ for x in iter:\\n+ yield x\\n+\\n+ args = get_args()\\n+\\n+ (train_dataloader, valid_dataloader, test_dataloader) = (None, None, None)\\n+\\n+ print_rank_0(\"> building train, validation, and test datasets ...\")\\n+\\n+ # Backward compatibility, assume fixed batch size.\\n+ if args.iteration > 0 and args.consumed_train_samples == 0:\\n+ assert args.train_samples is None, \"only backward compatiblity support for iteration-based training\"\\n+ args.consumed_train_samples = args.iteration * args.global_batch_size\\n+ if args.iteration > 0 and args.consumed_valid_samples == 0:\\n+ if args.train_samples is None:\\n+ args.consumed_valid_samples = (\\n+ (args.iteration // args.eval_interval) * args.eval_iters * args.global_batch_size\\n+ )\\n+\\n+ # Data loader only on rank 0 of each model parallel group.\\n+ if mpu.get_tensor_model_parallel_rank() == 0:\\n+\\n+ # Number of train/valid/test samples.\\n+ if args.train_samples:\\n+ train_samples = args.train_samples\\n+ else:\\n+ train_samples = args.train_iters * args.global_batch_size\\n+ eval_iters = (args.train_iters // args.eval_interval + 1) * args.eval_iters\\n+ test_iters = args.eval_iters\\n+ train_val_test_num_samples = [\\n+ train_samples,\\n+ eval_iters * args.global_batch_size,\\n+ test_iters * args.global_batch_size,\\n+ ]\\n+ print_rank_0(\" > datasets target sizes (minimum size):\")\\n+ print_rank_0(\" train: {}\".format(train_val_test_num_samples[0]))\\n+ print_rank_0(\" validation: {}\".format(train_val_test_num_samples[1]))\\n+ print_rank_0(\" test: {}\".format(train_val_test_num_samples[2]))\\n+\\n+ # Build the datasets.\\n+ train_valid_test_datasets_provider = self.get_train_valid_test_datasets_provider()\\n+ train_ds, valid_ds, test_ds = train_valid_test_datasets_provider(train_val_test_num_samples)\\n+\\n+ # Build dataloders.\\n+ train_dataloader = self.build_pretraining_data_loader(train_ds, args.consumed_train_samples)\\n+ valid_dataloader = self.build_pretraining_data_loader(valid_ds, args.consumed_valid_samples)\\n+ test_dataloader = self.build_pretraining_data_loader(test_ds, 0)\\n+\\n+ # Flags to know if we need to do training/validation/testing.\\n+ do_train = train_dataloader is not None and args.train_iters > 0\\n+ do_valid = valid_dataloader is not None and args.eval_iters > 0\\n+ do_test = test_dataloader is not None and args.eval_iters > 0\\n+ # Need to broadcast num_tokens and num_type_tokens.\\n+ flags = torch.cuda.LongTensor([int(do_train), int(do_valid), int(do_test)])\\n+ else:\\n+ flags = torch.cuda.LongTensor([0, 0, 0])\\n+\\n+ # Broadcast num tokens.\\n+ torch.distributed.broadcast(\\n+ flags, mpu.get_tensor_model_parallel_src_rank(), group=mpu.get_tensor_model_parallel_group()\\n+ )\\n+ args.do_train = flags[0].item()\\n+ args.do_valid = flags[1].item()\\n+ args.do_test = flags[2].item()\\n+\\n+ # Build iterators.\\n+ dl_type = args.dataloader_type\\n+ assert dl_type in [\"single\", \"cyclic\"]\\n+\\n+ if train_dataloader is not None:\\n+ train_data_iterator = (\\n+ iter(train_dataloader) if dl_type == \"single\" else iter(cyclic_iter(train_dataloader))\\n+ )\\n+ else:\\n+ train_data_iterator = None\\n+\\n+ if valid_dataloader is not None:\\n+ valid_data_iterator = (\\n+ iter(valid_dataloader) if dl_type == \"single\" else iter(cyclic_iter(valid_dataloader))\\n+ )\\n+ else:\\n+ valid_data_iterator = None\\n+\\n+ if test_dataloader is not None:\\n+ test_data_iterator = iter(test_dataloader) if dl_type == \"single\" else iter(cyclic_iter(test_dataloader))\\n+ else:\\n+ test_data_iterator = None\\n+\\n+ return train_data_iterator, valid_data_iterator, test_data_iterator\\n+\\n+\\n+def prepare_data_loader(accelerator, dataloader):\\n+ accelerator.print(\"Preparing dataloader\")\\n+ args = get_args()\\n+ if not args.megatron_dataset_flag:\\n+ from ..data_loader import _PYTORCH_DATALOADER_KWARGS, prepare_data_loader\\n+\\n+ args = get_args()\\n+ micro_batch_size = args.micro_batch_size * args.num_micro_batches\\n+ kwargs = {k: getattr(dataloader, k, _PYTORCH_DATALOADER_KWARGS[k]) for k in _PYTORCH_DATALOADER_KWARGS}\\n+ if kwargs[\"batch_size\"] is None:\\n+ if isinstance(kwargs[\"sampler\"], torch.utils.data.BatchSampler):\\n+ kwargs[\"sampler\"].batch_size = micro_batch_size\\n+ else:\\n+ del kwargs[\"sampler\"]\\n+ del kwargs[\"shuffle\"]\\n+ del kwargs[\"batch_size\"]\\n+ kwargs[\"batch_sampler\"].batch_size = micro_batch_size\\n+ else:\\n+ del kwargs[\"batch_sampler\"]\\n+ kwargs[\"batch_size\"] = micro_batch_size\\n+\\n+ dataloader = torch.utils.data.DataLoader(dataloader.dataset, **kwargs)\\n+ return prepare_data_loader(\\n+ dataloader,\\n+ accelerator.device,\\n+ num_processes=mpu.get_data_parallel_world_size(),\\n+ process_index=mpu.get_data_parallel_rank(),\\n+ split_batches=accelerator.split_batches,\\n+ put_on_device=True,\\n+ rng_types=accelerator.rng_types.copy(),\\n+ dispatch_batches=accelerator.dispatch_batches,\\n+ )\\n+ else:\\n+ if args.consumed_samples is not None:\\n+ (\\n+ args.consumed_train_samples,\\n+ args.consumed_valid_samples,\\n+ args.consumed_test_samples,\\n+ ) = args.consumed_samples\\n+ else:\\n+ args.consumed_train_samples, args.consumed_valid_samples, args.consumed_test_samples = 0, 0, 0\\n+ (\\n+ train_data_iterator,\\n+ valid_data_iterator,\\n+ test_data_iterator,\\n+ ) = dataloader.build_train_valid_test_data_iterators()\\n+ return train_data_iterator, valid_data_iterator, test_data_iterator\\n+\\n+\\n+# optimizer utilities\\n+class MegatronLMOptimizerWrapper(AcceleratedOptimizer):\\n+ def __init__(self, optimizer):\\n+ super().__init__(optimizer, device_placement=False, scaler=None)\\n+\\n+ def zero_grad(self, set_to_none=None):\\n+ pass # `model(**batch)` is doing that automatically. Therefore, it\\'s implementation is not needed\\n+\\n+ def step(self):\\n+ pass # `model(**batch)` is doing that automatically. Therefore, it\\'s implementation is not needed\\n+\\n+ @property\\n+ def step_was_skipped(self):\\n+ \"\"\"Whether or not the optimizer step was done, or skipped because of gradient overflow.\"\"\"\\n+ return self.optimizer.skipped_iter\\n+\\n+\\n+def prepare_optimizer(accelerator, model):\\n+ accelerator.print(\"Preparing optimizer\")\\n+ args = get_args()\\n+ optimizer = get_megatron_optimizer(model, args.no_wd_decay_cond, args.scale_lr_cond, args.lr_mult)\\n+ return optimizer\\n+\\n+\\n+# scheduler utilities\\n+class MegatronLMDummyScheduler:\\n+ \"\"\"\\n+ Dummy scheduler presents model parameters or param groups, this is primarily used to follow conventional training\\n+ loop when scheduler config is specified in the deepspeed config file.\\n+\\n+ Args:\\n+ optimizer (`torch.optim.optimizer.Optimizer`):\\n+ The optimizer to wrap.\\n+ total_num_steps (int):\\n+ Total number of steps.\\n+ warmup_num_steps (int):\\n+ Number of steps for warmup.\\n+ **kwargs:\\n+ Other arguments.\\n+ \"\"\"\\n+\\n+ def __init__(self, optimizer, total_num_steps=None, warmup_num_steps=0, **kwargs):\\n+ self.optimizer = optimizer\\n+ self.total_num_steps = total_num_steps\\n+ self.warmup_num_steps = warmup_num_steps\\n+ self.kwargs = kwargs\\n+\\n+\\n+class MegatronLMSchedulerWrapper(AcceleratedScheduler):\\n+ def __init__(self, scheduler, optimizers):\\n+ super().__init__(scheduler, optimizers)\\n+\\n+ def step(self, *args, **kwargs):\\n+ return # `model(**batch)` is doing that automatically. Therefore, it\\'s implementation is not needed\\n+\\n+\\n+def prepare_scheduler(accelerator, optimizer, scheduler):\\n+ accelerator.print(\"Preparing scheduler\")\\n+ scheduler = get_optimizer_param_scheduler(optimizer)\\n+ return scheduler\\n+\\n+\\n+class AbstractTrainStep(ABC):\\n+ \"\"\"Abstract class for batching, forward pass and loss handler.\"\"\"\\n+\\n+ def __init__(self, name):\\n+ super().__init__()\\n+ self.name = name\\n+\\n+ def get_batch_func(self):\\n+ pass\\n+\\n+ def get_forward_step_func(self):\\n+ pass\\n+\\n+ def get_loss_func(self):\\n+ pass\\n+\\n+\\n+class BertTrainStep(AbstractTrainStep):\\n+ \"\"\"\\n+ Bert train step class.\\n+\\n+ Args:\\n+ args (`argparse.Namespace`): Megatron-LM arguments.\\n+ \"\"\"\\n+\\n+ def __init__(self, args):\\n+ super().__init__(\"BertTrainStep\")\\n+ self.get_batch = self.get_batch_func(args.megatron_dataset_flag)\\n+ self.loss_func = self.get_loss_func(args.pretraining_flag, args.num_labels)\\n+ self.forward_step = self.get_forward_step_func(args.pretraining_flag, args.bert_binary_head)\\n+ if not args.model_return_dict:\\n+ self.model_output_class = None\\n+ else:\\n+ self.model_output_class = SequenceClassifierOutput\\n+\\n+ def get_batch_func(self, megatron_dataset_flag):\\n+ def get_batch_megatron(data_iterator):\\n+ \"\"\"Build the batch.\"\"\"\\n+\\n+ # Items and their type.\\n+ keys = [\"text\", \"types\", \"labels\", \"is_random\", \"loss_mask\", \"padding_mask\"]\\n+ datatype = torch.int64\\n+\\n+ # Broadcast data.\\n+ if data_iterator is not None:\\n+ data = next(data_iterator)\\n+ else:\\n+ data = None\\n+ data_b = mpu.broadcast_data(keys, data, datatype)\\n+\\n+ # Unpack.\\n+ tokens = data_b[\"text\"].long()\\n+ types = data_b[\"types\"].long()\\n+ sentence_order = data_b[\"is_random\"].long()\\n+ loss_mask = data_b[\"loss_mask\"].float()\\n+ lm_labels = data_b[\"labels\"].long()\\n+ padding_mask = data_b[\"padding_mask\"].long()\\n+\\n+ return tokens, types, sentence_order, loss_mask, lm_labels, padding_mask\\n+\\n+ def get_batch_transformer(data_iterator):\\n+ \"\"\"Build the batch.\"\"\"\\n+ data = next(data_iterator)\\n+ data = send_to_device(data, torch.cuda.current_device())\\n+\\n+ # Unpack.\\n+ tokens = data[\"input_ids\"].long()\\n+ padding_mask = data[\"attention_mask\"].long()\\n+ if \"token_type_ids\" in data:\\n+ types = data[\"token_type_ids\"].long()\\n+ else:\\n+ types = None\\n+ if \"labels\" in data:\\n+ lm_labels = data[\"labels\"].long()\\n+ loss_mask = (data[\"labels\"] != -100).to(torch.float)\\n+ else:\\n+ lm_labels = None\\n+ loss_mask = None\\n+ if \"next_sentence_label\" in data:\\n+ sentence_order = data[\"next_sentence_label\"].long()\\n+ else:\\n+ sentence_order = None\\n+\\n+ return tokens, types, sentence_order, loss_mask, lm_labels, padding_mask\\n+\\n+ if megatron_dataset_flag:\\n+ return get_batch_megatron\\n+ else:\\n+ return get_batch_transformer\\n+\\n+ def get_loss_func(self, pretraining_flag, num_labels):\\n+ def loss_func_pretrain(loss_mask, sentence_order, output_tensor):\\n+ lm_loss_, sop_logits = output_tensor\\n+\\n+ lm_loss_ = lm_loss_.float()\\n+ loss_mask = loss_mask.float()\\n+ lm_loss = torch.sum(lm_loss_.view(-1) * loss_mask.reshape(-1)) / loss_mask.sum()\\n+\\n+ if sop_logits is not None:\\n+ sop_loss = F.cross_entropy(sop_logits.view(-1, 2).float(), sentence_order.view(-1), ignore_index=-1)\\n+ sop_loss = sop_loss.float()\\n+ loss = lm_loss + sop_loss\\n+ averaged_losses = average_losses_across_data_parallel_group([lm_loss, sop_loss])\\n+ return loss, {\"lm loss\": averaged_losses[0], \"sop loss\": averaged_losses[1]}\\n+\\n+ else:\\n+ loss = lm_loss\\n+ averaged_losses = average_losses_across_data_parallel_group([lm_loss])\\n+ return loss, {\"lm loss\": averaged_losses[0]}\\n+\\n+ def loss_func_finetune(labels, logits):\\n+ if num_labels == 1:\\n+ # We are doing regression\\n+ loss_fct = MSELoss()\\n+ loss = loss_fct(logits.view(-1), labels.view(-1))\\n+ elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):\\n+ loss_fct = CrossEntropyLoss()\\n+ loss = loss_fct(logits.view(-1, num_labels), labels.view(-1))\\n+ else:\\n+ loss_fct = BCEWithLogitsLoss()\\n+ loss = loss_fct(logits, labels)\\n+ averaged_losses = average_losses_across_data_parallel_group([loss])\\n+ return loss, {\"loss\": averaged_losses[0]}\\n+\\n+ if pretraining_flag:\\n+ return loss_func_pretrain\\n+ else:\\n+ return loss_func_finetune\\n+\\n+ def get_forward_step_func(self, pretraining_flag, bert_binary_head):\\n+ def forward_step(data_iterator, model):\\n+ \"\"\"Forward step.\"\"\"\\n+ tokens, types, sentence_order, loss_mask, labels, padding_mask = self.get_batch(data_iterator)\\n+ if not bert_binary_head:\\n+ types = None\\n+ # Forward pass through the model.\\n+ if pretraining_flag:\\n+ output_tensor = model(tokens, padding_mask, tokentype_ids=types, lm_labels=labels)\\n+ return output_tensor, partial(self.loss_func, loss_mask, sentence_order)\\n+ else:\\n+ logits = model(tokens, padding_mask, tokentype_ids=types)\\n+ return logits, partial(self.loss_func, labels)\\n+\\n+ return forward_step\\n+\\n+\\n+class GPTTrainStep(AbstractTrainStep):\\n+ \"\"\"\\n+ GPT train step class.\\n+\\n+ Args:\\n+ args (`argparse.Namespace`): Megatron-LM arguments.\\n+ \"\"\"\\n+\\n+ def __init__(self, args):\\n+ super().__init__(\"GPTTrainStep\")\\n+ self.get_batch = self.get_batch_func(args.megatron_dataset_flag)\\n+ self.loss_func = self.get_loss_func()\\n+ self.forward_step = self.get_forward_step_func()\\n+ self.eod_token = args.padded_vocab_size - 1\\n+ if not args.model_return_dict:\\n+ self.model_output_class = None\\n+ else:\\n+ self.model_output_class = CausalLMOutputWithCrossAttentions\\n+\\n+ def get_batch_func(self, megatron_dataset_flag):\\n+ def get_batch_megatron(data_iterator):\\n+ \"\"\"Generate a batch\"\"\"\\n+ args = get_args()\\n+\\n+ # Items and their type.\\n+ keys = [\"text\"]\\n+ datatype = torch.int64\\n+\\n+ # Broadcast data.\\n+ if data_iterator is not None:\\n+ data = next(data_iterator)\\n+ else:\\n+ data = None\\n+ data_b = mpu.broadcast_data(keys, data, datatype)\\n+\\n+ # Unpack.\\n+ tokens_ = data_b[\"text\"].long()\\n+ labels = tokens_[:, 1:].contiguous()\\n+ tokens = tokens_[:, :-1].contiguous()\\n+\\n+ # Get the masks and postition ids.\\n+ attention_mask, loss_mask, position_ids = get_ltor_masks_and_position_ids(\\n+ tokens, self.eod_token, args.reset_position_ids, args.reset_attention_mask, args.eod_mask_loss\\n+ )\\n+\\n+ return tokens, labels, loss_mask, attention_mask, position_ids\\n+\\n+ def get_batch_transformer(data_iterator):\\n+ data = next(data_iterator)\\n+ data = {\"input_ids\": data[\"input_ids\"]}\\n+ data = send_to_device(data, torch.cuda.current_device())\\n+\\n+ tokens_ = data[\"input_ids\"].long()\\n+ padding = torch.zeros((tokens_.shape[0], 1), dtype=tokens_.dtype, device=tokens_.device) + self.eod_token\\n+ tokens_ = torch.concat([tokens_, padding], dim=1)\\n+ labels = tokens_[:, 1:].contiguous()\\n+ tokens = tokens_[:, :-1].contiguous()\\n+ # Get the masks and postition ids.\\n+ attention_mask, loss_mask, position_ids = get_ltor_masks_and_position_ids(\\n+ tokens, self.eod_token, False, False, True\\n+ )\\n+ return tokens, labels, loss_mask, attention_mask, position_ids\\n+\\n+ if megatron_dataset_flag:\\n+ return get_batch_megatron\\n+ else:\\n+ return get_batch_transformer\\n+\\n+ def get_loss_func(self):\\n+ def loss_func(loss_mask, output_tensor):\\n+ losses = output_tensor.float()\\n+ loss_mask = loss_mask.view(-1).float()\\n+ loss = torch.sum(losses.view(-1) * loss_mask) / loss_mask.sum()\\n+\\n+ # Reduce loss for logging.\\n+ averaged_loss = average_losses_across_data_parallel_group([loss])\\n+\\n+ return loss, {\"lm loss\": averaged_loss[0]}\\n+\\n+ return loss_func\\n+\\n+ def get_forward_step_func(self):\\n+ def forward_step(data_iterator, model):\\n+ \"\"\"Forward step.\"\"\"\\n+ # Get the batch.\\n+ tokens, labels, loss_mask, attention_mask, position_ids = self.get_batch(data_iterator)\\n+ output_tensor = model(tokens, position_ids, attention_mask, labels=labels)\\n+\\n+ return output_tensor, partial(self.loss_func, loss_mask)\\n+\\n+ return forward_step\\n+\\n+\\n+class T5TrainStep(AbstractTrainStep):\\n+ \"\"\"\\n+ T5 train step class.\\n+\\n+ Args:\\n+ args (`argparse.Namespace`): Megatron-LM arguments.\\n+ \"\"\"\\n+\\n+ def __init__(self, args):\\n+ super().__init__(\"T5TrainStep\")\\n+ self.get_batch = self.get_batch_func(args.megatron_dataset_flag)\\n+ self.loss_func = self.get_loss_func()\\n+ self.forward_step = self.get_forward_step_func()\\n+ if not args.model_return_dict:\\n+ self.model_output_class = None\\n+ else:\\n+ self.model_output_class = Seq2SeqLMOutput\\n+\\n+ @staticmethod\\n+ def attn_mask_postprocess(attention_mask):\\n+ # We create a 3D attention mask from a 2D tensor mask.\\n+ # [b, 1, s]\\n+ attention_mask_b1s = attention_mask.unsqueeze(1)\\n+ # [b, s, 1]\\n+ attention_mask_bs1 = attention_mask.unsqueeze(2)\\n+ # [b, s, s]\\n+ attention_mask_bss = attention_mask_b1s * attention_mask_bs1\\n+ # Convert attention mask to binary:\\n+ extended_attention_mask = attention_mask_bss < 0.5\\n+ return extended_attention_mask\\n+\\n+ @staticmethod\\n+ def get_decoder_mask(seq_length, device):\\n+ attention_mask = torch.tril(torch.ones((1, seq_length, seq_length), device=device))\\n+ attention_mask = attention_mask < 0.5\\n+ return attention_mask\\n+\\n+ @staticmethod\\n+ def get_enc_dec_mask(attention_mask, dec_seq_length, device):\\n+ batch_size, _ = attention_mask.shape\\n+ # We create a 3D attention mask from a 2D tensor mask.\\n+ # [b, 1, s]\\n+ attention_mask_b1s = attention_mask.unsqueeze(1)\\n+ # [b, s, 1]\\n+ attention_mask_bs1 = torch.ones((batch_size, dec_seq_length, 1), device=device)\\n+ attention_mask_bss = attention_mask_bs1 * attention_mask_b1s\\n+ extended_attention_mask = attention_mask_bss < 0.5\\n+ return extended_attention_mask\\n+\\n+ def get_batch_func(self, megatron_dataset_flag):\\n+ def get_batch_megatron(data_iterator):\\n+ \"\"\"Build the batch.\"\"\"\\n+\\n+ keys = [\"text_enc\", \"text_dec\", \"labels\", \"loss_mask\", \"enc_mask\", \"dec_mask\", \"enc_dec_mask\"]\\n+ datatype = torch.int64\\n+\\n+ # Broadcast data.\\n+ if data_iterator is not None:\\n+ data = next(data_iterator)\\n+ else:\\n+ data = None\\n+ data_b = mpu.broadcast_data(keys, data, datatype)\\n+\\n+ # Unpack.\\n+ tokens_enc = data_b[\"text_enc\"].long()\\n+ tokens_dec = data_b[\"text_dec\"].long()\\n+ labels = data_b[\"labels\"].long()\\n+ loss_mask = data_b[\"loss_mask\"].float()\\n+\\n+ enc_mask = data_b[\"enc_mask\"] < 0.5\\n+ dec_mask = data_b[\"dec_mask\"] < 0.5\\n+ enc_dec_mask = data_b[\"enc_dec_mask\"] < 0.5\\n+\\n+ return tokens_enc, tokens_dec, loss_mask, labels, enc_mask, dec_mask, enc_dec_mask\\n+\\n+ def get_batch_transformer(data_iterator):\\n+ \"\"\"Build the batch.\"\"\"\\n+ data = next(data_iterator)\\n+ data = send_to_device(data, torch.cuda.current_device())\\n+\\n+ tokens_enc = data[\"input_ids\"].long()\\n+ labels = data[\"labels\"].long()\\n+ loss_mask = (labels != -100).to(torch.float)\\n+ if \"decoder_input_ids\" in data:\\n+ tokens_dec = data[\"decoder_input_ids\"].long()\\n+ else:\\n+ tokens_dec = labels.new_zeros(labels.shape, device=labels.device, dtype=torch.long)\\n+ tokens_dec[..., 1:] = labels[..., :-1].clone()\\n+ tokens_dec[..., 0] = 0\\n+ tokens_dec.masked_fill_(tokens_dec == -100, 0)\\n+ enc_mask = T5TrainStep.attn_mask_postprocess(data[\"attention_mask\"].long())\\n+ dec_mask = T5TrainStep.get_decoder_mask(tokens_dec.shape[1], tokens_dec.device)\\n+ enc_dec_mask = T5TrainStep.get_enc_dec_mask(\\n+ data[\"attention_mask\"].long(), tokens_dec.shape[1], tokens_dec.device\\n+ )\\n+\\n+ return tokens_enc, tokens_dec, loss_mask, labels, enc_mask, dec_mask, enc_dec_mask\\n+\\n+ if megatron_dataset_flag:\\n+ return get_batch_megatron\\n+ else:\\n+ return get_batch_transformer\\n+\\n+ def get_loss_func(self):\\n+ def loss_func(loss_mask, output_tensor):\\n+ lm_loss_ = output_tensor.float()\\n+ lm_loss = torch.sum(lm_loss_.view(-1) * loss_mask.reshape(-1)) / loss_mask.sum()\\n+\\n+ loss = lm_loss\\n+ averaged_losses = average_losses_across_data_parallel_group([lm_loss])\\n+\\n+ return loss, {\"lm loss\": averaged_losses[0]}\\n+\\n+ return loss_func\\n+\\n+ def get_forward_step_func(self):\\n+ def forward_step(data_iterator, model):\\n+ \"\"\"Forward step.\"\"\"\\n+ # Get the batch.\\n+ tokens_enc, tokens_dec, loss_mask, lm_labels, enc_mask, dec_mask, enc_dec_mask = self.get_batch(\\n+ data_iterator\\n+ )\\n+ # Forward model lm_labels\\n+ output_tensor = model(\\n+ tokens_enc, tokens_dec, enc_mask, dec_mask, enc_dec_mask, tokentype_ids=None, lm_labels=lm_labels\\n+ )\\n+\\n+ return output_tensor, partial(self.loss_func, loss_mask)\\n+\\n+ return forward_step\\n+\\n+\\n+# intialize megatron setup\\n+def initialize(accelerator, extra_args_provider=None, args_defaults={}):\\n+ accelerator.print(\"Initializing Megatron-LM\")\\n+ assert torch.cuda.is_available(), \"Megatron requires CUDA.\"\\n+\\n+ # Parse arguments\\n+ args = parse_args(extra_args_provider, ignore_unknown_args=True)\\n+\\n+ # Set defaults\\n+ for key, value in args_defaults.items():\\n+ if getattr(args, key, None) is not None:\\n+ if args.rank == 0:\\n+ print(\\n+ \"WARNING: overriding default arguments for {key}:{v} \\\\\\n+ with {key}:{v2}\".format(\\n+ key=key, v=getattr(args, key), v2=value\\n+ ),\\n+ flush=True,\\n+ )\\n+ setattr(args, key, value)\\n+\\n+ if args.use_checkpoint_args or args_defaults.get(\"use_checkpoint_args\", False):\\n+ assert args.load is not None, \"--use-checkpoints-args requires --load argument\"\\n+ load_args_from_checkpoint(args)\\n+\\n+ validate_args(args)\\n+\\n+ # set global args, build tokenizer, and set adlr-autoresume,\\n+ # tensorboard-writer, and timers.\\n+ set_global_variables(args)\\n+\\n+ # torch.distributed initialization\\n+ def finish_mpu_init():\\n+ args = get_args()\\n+ # Pytorch distributed.\\n+ device_count = torch.cuda.device_count()\\n+ args.rank = torch.distributed.get_rank()\\n+ args.world_size = torch.distributed.get_world_size()\\n+ if device_count > 0:\\n+ device = args.rank % device_count\\n+ if args.local_rank is not None:\\n+ assert args.local_rank == device, \"expected local-rank to be the same as rank % device-count.\"\\n+ else:\\n+ args.local_rank = device\\n+\\n+ # Set the tensor model-parallel, pipeline model-parallel, and\\n+ # data-parallel communicators.\\n+ if mpu.model_parallel_is_initialized():\\n+ print(\"model parallel is already initialized\")\\n+ else:\\n+ mpu.initialize_model_parallel(\\n+ args.tensor_model_parallel_size,\\n+ args.pipeline_model_parallel_size,\\n+ args.virtual_pipeline_model_parallel_size,\\n+ args.pipeline_model_parallel_split_rank,\\n+ )\\n+\\n+ # Random seeds for reproducibility.\\n+ if args.rank == 0:\\n+ print(\"> setting random seeds to {} ...\".format(args.seed))\\n+ _set_random_seed(args.seed, args.data_parallel_random_init)\\n+\\n+ args = get_args()\\n+\\n+ # Megatron\\'s MPU is the master. Complete initialization right away.\\n+ finish_mpu_init()\\n+\\n+ # Autoresume.\\n+ _init_autoresume()\\n+\\n+ # Compile dependencies.\\n+ _compile_dependencies()\\n+\\n+ # Set pytorch JIT layer fusion options and warmup JIT functions.\\n+ set_jit_fusion_options()\\n+ args = get_args()\\n+ args.padded_vocab_size = _vocab_size_with_padding(args.orig_vocab_size, args)\\n+ if args.model_type_name == \"bert\" and args.pretraining_flag and args.num_labels == 2:\\n+ args.bert_binary_head = True\\n+ else:\\n+ args.bert_binary_head = False\\n+ args.iteration = 0\\n+\\n+\\n+class MegatronEngine(torch.nn.Module):\\n+ \"\"\"\\n+ Megatron-LM model wrapper\\n+\\n+ Args:\\n+ accelerator (:class:`~accelerate.Accelerator`): The accelerator object to use.\\n+ model: Megatron-LM model\\n+ optimizer: Megatron-LM optimizer\\n+ lr_scheduler: Megatron-LM lr scheduler\\n+ \"\"\"\\n+\\n+ def __init__(self, accelerator, model, optimizer, scheduler):\\n+ super(MegatronEngine, self).__init__()\\n+ self.module = model\\n+ self.base_model = model[0]\\n+ self.optimizer = optimizer\\n+ self.scheduler = scheduler\\n+ args = get_args()\\n+ if accelerator.state.megatron_lm_plugin.custom_train_step_class is not None:\\n+ self.train_step_handler = accelerator.state.megatron_lm_plugin.custom_train_step_class(\\n+ args, **accelerator.state.megatron_lm_plugin.custom_train_step_kwargs\\n+ )\\n+ elif args.model_type_name == \"bert\":\\n+ self.train_step_handler = BertTrainStep(args)\\n+ elif args.model_type_name == \"gpt\":\\n+ self.train_step_handler = GPTTrainStep(args)\\n+ elif args.model_type_name == \"t5\":\\n+ self.train_step_handler = T5TrainStep(args)\\n+ else:\\n+ raise ValueError(f\"Unsupported model type: {args.model_type_name}\")\\n+ self.optimizer.skipped_iter = False\\n+\\n+ # Tracking loss.\\n+ self.total_loss_dict = {}\\n+ self.eval_total_loss_dict = {}\\n+ self.iteration = 0\\n+ self.report_memory_flag = True\\n+ if args.tensorboard_dir is not None:\\n+ write_args_to_tensorboard()\\n+\\n+ def train(self):\\n+ for model_module in self.module:\\n+ model_module.train()\\n+ self.log_eval_results()\\n+\\n+ def eval(self):\\n+ for model_module in self.module:\\n+ model_module.eval()\\n+\\n+ def train_step(self, **batch_data):\\n+ \"\"\"\\n+ Training step for Megatron-LM\\n+\\n+ Args:\\n+ batch_data (:obj:`dict`): The batch data to train on.\\n+ \"\"\"\\n+\\n+ args = get_args()\\n+ timers = get_timers()\\n+\\n+ if len(batch_data) > 0:\\n+ data_chunks = []\\n+ if args.num_micro_batches > 1:\\n+ for i in range(0, args.num_micro_batches):\\n+ data_chunks.append(\\n+ {\\n+ k: v[i * args.micro_batch_size : (i + 1) * args.micro_batch_size]\\n+ for k, v in batch_data.items()\\n+ }\\n+ )\\n+ else:\\n+ data_chunks = [batch_data]\\n+\\n+ if len(self.module) > 1:\\n+ batch_data_iterator = (\\n+ [iter(data_chunks) for _ in range(len(self.module))]\\n+ if len(batch_data) > 0\\n+ else [None] * len(self.module)\\n+ )\\n+ else:\\n+ batch_data_iterator = iter(data_chunks) if len(batch_data) > 0 else None\\n+\\n+ # Set grad to zero.\\n+ if args.DDP_impl == \"local\" and args.use_contiguous_buffers_in_local_ddp:\\n+ for partition in self.module:\\n+ partition.zero_grad_buffer()\\n+ self.optimizer.zero_grad()\\n+\\n+ # Forward pass.\\n+ forward_backward_func = get_forward_backward_func()\\n+ losses_reduced = forward_backward_func(\\n+ self.train_step_handler.forward_step,\\n+ batch_data_iterator,\\n+ self.module,\\n+ self.optimizer,\\n+ None,\\n+ forward_only=False,\\n+ )\\n+\\n+ # Empty unused memory.\\n+ if args.empty_unused_memory_level >= 1:\\n+ torch.cuda.empty_cache()\\n+\\n+ # Reduce gradients.\\n+ timers(\"backward-reduce-model-grads\").start()\\n+ self.optimizer.reduce_model_grads(args, timers)\\n+ timers(\"backward-reduce-model-grads\").stop()\\n+\\n+ # Update parameters.\\n+ timers(\"optimizer\").start()\\n+ update_successful, grad_norm, num_zeros_in_grad = self.optimizer.step(args, timers)\\n+ timers(\"optimizer\").stop()\\n+\\n+ # Gather params.\\n+ if update_successful:\\n+ timers(\"backward-gather-model-params\").start()\\n+ self.optimizer.gather_model_params(args, timers)\\n+ timers(\"backward-gather-model-params\").stop()\\n+\\n+ # Update learning rate.\\n+ if update_successful:\\n+ if self.scheduler is not None:\\n+ increment = get_num_microbatches() * args.micro_batch_size * args.data_parallel_size\\n+ self.scheduler.step(increment=increment)\\n+ skipped_iter = 0\\n+ else:\\n+ skipped_iter = 1\\n+\\n+ self.optimizer.skipped_iter = not update_successful\\n+\\n+ # Empty unused memory.\\n+ if args.empty_unused_memory_level >= 2:\\n+ torch.cuda.empty_cache()\\n+\\n+ args.consumed_train_samples += (\\n+ mpu.get_data_parallel_world_size() * args.micro_batch_size * get_num_microbatches()\\n+ )\\n+\\n+ if mpu.is_pipeline_last_stage(ignore_virtual=True):\\n+ # Average loss across microbatches.\\n+ loss_reduced = {}\\n+ for key in losses_reduced[0]:\\n+ losses_reduced_for_key = [x[key] for x in losses_reduced]\\n+ loss_reduced[key] = sum(losses_reduced_for_key) / len(losses_reduced_for_key)\\n+ return loss_reduced, skipped_iter, grad_norm, num_zeros_in_grad\\n+ return {}, skipped_iter, grad_norm, num_zeros_in_grad\\n+\\n+ def eval_step(self, **batch_data):\\n+ \"\"\"\\n+ Evaluation step for Megatron-LM\\n+\\n+ Args:\\n+ batch_data (:obj:`dict`): The batch data to evaluate on.\\n+ \"\"\"\\n+\\n+ args = get_args()\\n+ data_chunks = []\\n+ if args.num_micro_batches > 1:\\n+ for i in range(0, args.num_micro_batches):\\n+ data_chunks.append(\\n+ {k: v[i * args.micro_batch_size : (i + 1) * args.micro_batch_size] for k, v in batch_data.items()}\\n+ )\\n+ else:\\n+ data_chunks = [batch_data]\\n+\\n+ if len(self.module) > 1:\\n+ batch_data_iterator = [iter(data_chunks) for _ in range(len(self.module))]\\n+ else:\\n+ batch_data_iterator = iter(data_chunks)\\n+ forward_backward_func = get_forward_backward_func()\\n+ loss_dicts = forward_backward_func(\\n+ self.train_step_handler.forward_step,\\n+ batch_data_iterator,\\n+ self.module,\\n+ optimizer=None,\\n+ timers=None,\\n+ forward_only=True,\\n+ )\\n+ # Empty unused memory\\n+ if args.empty_unused_memory_level >= 1:\\n+ torch.cuda.empty_cache()\\n+\\n+ args.consumed_valid_samples += (\\n+ mpu.get_data_parallel_world_size() * args.micro_batch_size * get_num_microbatches()\\n+ )\\n+\\n+ if mpu.is_pipeline_last_stage(ignore_virtual=True):\\n+ # Average loss across microbatches.\\n+ loss_reduced = {}\\n+ for key in loss_dicts[0]:\\n+ losses_reduced_for_key = [x[key] for x in loss_dicts]\\n+ loss_reduced[key] = sum(losses_reduced_for_key) / len(losses_reduced_for_key)\\n+ return loss_reduced\\n+ else:\\n+ return {}\\n+\\n+ def forward(self, **batch_data):\\n+ # During training, we use train_step()\\n+ # model(**batch_data) performs following operations by delegating it to `self.train_step`:\\n+ # 1. Prepare **batch_data for Tendor, Pipeline and Model Parallelism\\n+ # 2. Set grad to zero.\\n+ # 3. forward pass and backward pass using Pipeline Parallelism\\n+ # 4. Empty unused memory.\\n+ # 5. Reduce gradients.\\n+ # 6. Update parameters.\\n+ # 7. Gather params when using Distributed Optimizer (Data Parallelism).\\n+ # 8. Update learning rate if scheduler is specified.\\n+ # 9. Empty unused memory.\\n+ # 10. Average loss across microbatches and across DP ranks.\\n+ #\\n+ # During evaluation, we use eval_step()\\n+ args = get_args()\\n+ if self.module[0].training:\\n+ loss_dict, skipped_iter, grad_norm, num_zeros_in_grad = self.train_step(**batch_data)\\n+ self.iteration += 1\\n+ if args.tensorboard_dir is not None:\\n+ # Logging.\\n+ loss_scale = self.optimizer.get_loss_scale().item()\\n+ params_norm = None\\n+ if args.log_params_norm:\\n+ params_norm = calc_params_l2_norm(self.model)\\n+ self.report_memory_flag = training_log(\\n+ loss_dict,\\n+ self.total_loss_dict,\\n+ self.optimizer.param_groups[0][\"lr\"],\\n+ self.iteration,\\n+ loss_scale,\\n+ self.report_memory_flag,\\n+ skipped_iter,\\n+ grad_norm,\\n+ params_norm,\\n+ num_zeros_in_grad,\\n+ )\\n+ else:\\n+ loss_dict = self.eval_step(**batch_data)\\n+ if args.tensorboard_dir is not None:\\n+ for key in loss_dict:\\n+ self.eval_total_loss_dict[key] = (\\n+ self.eval_total_loss_dict.get(key, torch.cuda.FloatTensor([0.0])) + loss_dict[key]\\n+ )\\n+ self.eval_total_loss_dict[key + \"_num_iters\"] = self.eval_total_loss_dict.get(\\n+ key + \"_num_iters\", torch.cuda.FloatTensor([0.0])\\n+ ) + torch.cuda.FloatTensor([1.0])\\n+\\n+ loss = torch.tensor(0.0, device=args.local_rank)\\n+ for key in loss_dict:\\n+ loss += loss_dict[key]\\n+ # loss = reduce(loss)\\n+ if self.train_step_handler.model_output_class is not None:\\n+ return self.train_step_handler.model_output_class(loss=loss)\\n+ return loss\\n+\\n+ def log_eval_results(self):\\n+ args = get_args()\\n+ if args.tensorboard_dir is None or self.iteration == 0:\\n+ return\\n+ args = get_args()\\n+ writer = get_tensorboard_writer()\\n+ string = f\"validation loss at iteration {self.iteration} | \"\\n+ for key in self.eval_total_loss_dict:\\n+ if key.endswith(\"_num_iters\"):\\n+ continue\\n+ value = self.eval_total_loss_dict[key] / self.eval_total_loss_dict[key + \"_num_iters\"]\\n+ string += f\"{key} value: {value} | \"\\n+ ppl = math.exp(min(20, value.item()))\\n+ if args.pretraining_flag:\\n+ string += f\"{key} PPL: {ppl} | \"\\n+ if writer:\\n+ writer.add_scalar(f\"{key} validation\", value.item(), self.iteration)\\n+ if args.pretraining_flag:\\n+ writer.add_scalar(f\"{key} validation ppl\", ppl, self.iteration)\\n+\\n+ length = len(string) + 1\\n+ print_rank_last(\"-\" * length)\\n+ print_rank_last(string)\\n+ print_rank_last(\"-\" * length)\\n+ self.eval_total_loss_dict = {}\\n+\\n+ def save_checkpoint(self, output_dir):\\n+ self.log_eval_results()\\n+ args = get_args()\\n+ args.save = output_dir\\n+ torch.distributed.barrier()\\n+ save_checkpoint(self.iteration, self.module, self.optimizer, self.scheduler)\\n+ torch.distributed.barrier()\\n+\\n+ def load_checkpoint(self, input_dir):\\n+ args = get_args()\\n+ args.load = input_dir\\n+ args.consumed_train_samples = 0\\n+ args.consumed_valid_samples = 0\\n+ torch.distributed.barrier()\\n+ iteration = load_checkpoint(self.module, self.optimizer, self.scheduler)\\n+ torch.distributed.barrier()\\n+ self.iteration = iteration\\n+ if args.fp16 and self.iteration == 0:\\n+ self.optimizer.reload_model_params()\\n+\\n+\\n+# other utilities\\n+def avg_losses_across_data_parallel_group(losses):\\n+ \"\"\"\\n+ Average losses across data parallel group.\\n+\\n+ Args:\\n+ losses (List[Tensor]): List of losses to average across data parallel group.\\n+ \"\"\"\\n+\\n+ return average_losses_across_data_parallel_group(losses)\\ndiff --git a/src/accelerate/utils/operations.py b/src/accelerate/utils/operations.py\\nindex 3ba46a03f..aef1fd589 100644\\n--- a/src/accelerate/utils/operations.py\\n+++ b/src/accelerate/utils/operations.py\\n@@ -224,6 +224,7 @@ def gather(tensor):\\n DistributedType.DEEPSPEED,\\n DistributedType.MULTI_GPU,\\n DistributedType.FSDP,\\n+ DistributedType.MEGATRON_LM,\\n ]:\\n return _gpu_gather(tensor)\\n elif AcceleratorState().distributed_type == DistributedType.MULTI_CPU:\\n@@ -261,6 +262,7 @@ def gather_object(object: Any):\\n DistributedType.DEEPSPEED,\\n DistributedType.MULTI_GPU,\\n DistributedType.FSDP,\\n+ DistributedType.MEGATRON_LM,\\n ]:\\n return _gpu_gather_object(object)\\n elif AcceleratorState().distributed_type == DistributedType.MULTI_CPU:\\n@@ -304,6 +306,7 @@ def broadcast(tensor, from_process: int = 0):\\n DistributedType.DEEPSPEED,\\n DistributedType.MULTI_GPU,\\n DistributedType.FSDP,\\n+ DistributedType.MEGATRON_LM,\\n ]:\\n return _gpu_broadcast(tensor, src=from_process)\\n elif AcceleratorState().distributed_type == DistributedType.MULTI_CPU:\\n@@ -332,6 +335,7 @@ def broadcast_object_list(object_list, from_process: int = 0):\\n DistributedType.DEEPSPEED,\\n DistributedType.MULTI_GPU,\\n DistributedType.FSDP,\\n+ DistributedType.MEGATRON_LM,\\n ]:\\n torch.distributed.broadcast_object_list(object_list, src=from_process)\\n elif AcceleratorState().distributed_type == DistributedType.MULTI_CPU:\\n@@ -452,6 +456,7 @@ def _reduce_across_processes(tensor, reduction=\"mean\"):\\n DistributedType.DEEPSPEED,\\n DistributedType.MULTI_GPU,\\n DistributedType.FSDP,\\n+ DistributedType.MEGATRON_LM,\\n ]:\\n torch.distributed.all_reduce(cloned_tensor, ReduceOp.SUM)\\n return cloned_tensor\\ndiff --git a/src/accelerate/utils/random.py b/src/accelerate/utils/random.py\\nindex e95ed03bf..8dc149a02 100644\\n--- a/src/accelerate/utils/random.py\\n+++ b/src/accelerate/utils/random.py\\n@@ -64,7 +64,12 @@ def synchronize_rng_state(rng_type: Optional[RNGType] = None, generator: Optiona\\n state = AcceleratorState()\\n if state.distributed_type == DistributedType.TPU:\\n rng_state = xm.mesh_reduce(\"random_seed\", rng_state, lambda x: x[0])\\n- elif state.distributed_type in [DistributedType.DEEPSPEED, DistributedType.MULTI_GPU, DistributedType.FSDP]:\\n+ elif state.distributed_type in [\\n+ DistributedType.DEEPSPEED,\\n+ DistributedType.MULTI_GPU,\\n+ DistributedType.FSDP,\\n+ DistributedType.MEGATRON_LM,\\n+ ]:\\n rng_state = rng_state.to(state.device)\\n torch.distributed.broadcast(rng_state, 0)\\n rng_state = rng_state.cpu()\\ndiff --git a/tests/test_examples.py b/tests/test_examples.py\\nindex 9197070c8..05688f2b5 100644\\n--- a/tests/test_examples.py\\n+++ b/tests/test_examples.py\\n@@ -39,6 +39,7 @@\\n \"automatic_gradient_accumulation.py\",\\n \"fsdp_with_peak_mem_tracking.py\",\\n \"deepspeed_with_config_support.py\",\\n+ \"megatron_lm_gpt_pretraining.py\",\\n ]\\n \\n \\n',\n", - " 'code_comments': [{'body': '```suggestion\\r\\n self.megatron_lm_default_args = {\\r\\n```\\r\\nAlso you use `megatronlm_plugin` with no `_` between `megatron` and `lm`., just flagging this so you stay consistent (I prefer `megatron_lm` personally.',\n", - " 'diff_hunk': '@@ -643,3 +645,142 @@ def load_optimizer(self, accelerator, optimizer, model, input_dir, optimizer_ind\\n # called from all ranks, though only rank0 has a valid param for full_osd\\n sharded_osd = FSDP.scatter_full_optim_state_dict(full_osd, model)\\n optimizer.load_state_dict(sharded_osd)\\n+\\n+\\n+@dataclass\\n+class MegatronLMPlugin:\\n+ \"\"\"\\n+ Plugin for Megatron-LM to enable tensor, pipeline, sequence and data parallelism. Also to enable selective\\n+ activation recomputation and optimized fused kernels.\\n+ \"\"\"\\n+\\n+ tp_degree: int = field(default=None, metadata={\"help\": \"tensor parallelism degree.\"})\\n+ pp_degree: int = field(default=None, metadata={\"help\": \"pipeline parallelism degree.\"})\\n+ num_micro_batches: int = field(default=None, metadata={\"help\": \"number of micro-batches.\"})\\n+ gradient_clipping: float = field(\\n+ default=None, metadata={\"help\": \"gradient clipping value based on global L2 Norm (0 to disable)\"}\\n+ )\\n+ sequence_parallelism: bool = field(\\n+ default=None,\\n+ metadata={\"help\": \"enable sequence parallelism\"},\\n+ )\\n+ recompute_activation: bool = field(\\n+ default=None,\\n+ metadata={\"help\": \"enable selective activation recomputation\"},\\n+ )\\n+ use_distributed_optimizer: bool = field(\\n+ default=None,\\n+ metadata={\"help\": \"enable distributed optimizer\"},\\n+ )\\n+ pipeline_model_parallel_split_rank: int = field(\\n+ default=None, metadata={\"help\": \"Rank where encoder and decoder should be split.\"}\\n+ )\\n+ num_layers_per_virtual_pipeline_stage: int = field(\\n+ default=None, metadata={\"help\": \"Number of layers per virtual pipeline stage.\"}\\n+ )\\n+ is_train_batch_min: str = field(\\n+ default=True,\\n+ metadata={\"help\": \"If both train & eval dataloaders are specified, this will decide the micro_batch_size\"},\\n+ )\\n+\\n+ def __post_init__(self):\\n+ prefix = \"MEGATRON_\"\\n+ if self.tp_degree is None:\\n+ self.tp_degree = int(os.environ.get(prefix + \"TP_DEGREE\", 1))\\n+ if self.pp_degree is None:\\n+ self.pp_degree = int(os.environ.get(prefix + \"PP_DEGREE\", 1))\\n+ if self.num_micro_batches is None:\\n+ self.num_micro_batches = int(os.environ.get(prefix + \"NUM_MICRO_BATCHES\", 1))\\n+ if self.gradient_clipping is None:\\n+ self.gradient_clipping = float(os.environ.get(prefix + \"GRADIENT_CLIPPING\", 1.0))\\n+ if self.recompute_activation is None:\\n+ self.recompute_activation = strtobool(os.environ.get(prefix + \"RECOMPUTE_ACTIVATION\", \"False\")) == 1\\n+ if self.use_distributed_optimizer is None:\\n+ self.use_distributed_optimizer = (\\n+ strtobool(os.environ.get(prefix + \"USE_DISTRIBUTED_OPTIMIZER\", \"False\")) == 1\\n+ )\\n+ if self.sequence_parallelism is None:\\n+ self.sequence_parallelism = strtobool(os.environ.get(prefix + \"SEQUENCE_PARALLELISM\", \"False\")) == 1\\n+\\n+ if self.pp_degree > 1:\\n+ self.DDP_impl = \"local\"\\n+ else:\\n+ self.DDP_impl = \"torch\"\\n+\\n+ self.megtron_lm_default_args = {',\n", - " 'from_author': False},\n", - " {'body': 'Done.',\n", - " 'diff_hunk': '@@ -643,3 +645,142 @@ def load_optimizer(self, accelerator, optimizer, model, input_dir, optimizer_ind\\n # called from all ranks, though only rank0 has a valid param for full_osd\\n sharded_osd = FSDP.scatter_full_optim_state_dict(full_osd, model)\\n optimizer.load_state_dict(sharded_osd)\\n+\\n+\\n+@dataclass\\n+class MegatronLMPlugin:\\n+ \"\"\"\\n+ Plugin for Megatron-LM to enable tensor, pipeline, sequence and data parallelism. Also to enable selective\\n+ activation recomputation and optimized fused kernels.\\n+ \"\"\"\\n+\\n+ tp_degree: int = field(default=None, metadata={\"help\": \"tensor parallelism degree.\"})\\n+ pp_degree: int = field(default=None, metadata={\"help\": \"pipeline parallelism degree.\"})\\n+ num_micro_batches: int = field(default=None, metadata={\"help\": \"number of micro-batches.\"})\\n+ gradient_clipping: float = field(\\n+ default=None, metadata={\"help\": \"gradient clipping value based on global L2 Norm (0 to disable)\"}\\n+ )\\n+ sequence_parallelism: bool = field(\\n+ default=None,\\n+ metadata={\"help\": \"enable sequence parallelism\"},\\n+ )\\n+ recompute_activation: bool = field(\\n+ default=None,\\n+ metadata={\"help\": \"enable selective activation recomputation\"},\\n+ )\\n+ use_distributed_optimizer: bool = field(\\n+ default=None,\\n+ metadata={\"help\": \"enable distributed optimizer\"},\\n+ )\\n+ pipeline_model_parallel_split_rank: int = field(\\n+ default=None, metadata={\"help\": \"Rank where encoder and decoder should be split.\"}\\n+ )\\n+ num_layers_per_virtual_pipeline_stage: int = field(\\n+ default=None, metadata={\"help\": \"Number of layers per virtual pipeline stage.\"}\\n+ )\\n+ is_train_batch_min: str = field(\\n+ default=True,\\n+ metadata={\"help\": \"If both train & eval dataloaders are specified, this will decide the micro_batch_size\"},\\n+ )\\n+\\n+ def __post_init__(self):\\n+ prefix = \"MEGATRON_\"\\n+ if self.tp_degree is None:\\n+ self.tp_degree = int(os.environ.get(prefix + \"TP_DEGREE\", 1))\\n+ if self.pp_degree is None:\\n+ self.pp_degree = int(os.environ.get(prefix + \"PP_DEGREE\", 1))\\n+ if self.num_micro_batches is None:\\n+ self.num_micro_batches = int(os.environ.get(prefix + \"NUM_MICRO_BATCHES\", 1))\\n+ if self.gradient_clipping is None:\\n+ self.gradient_clipping = float(os.environ.get(prefix + \"GRADIENT_CLIPPING\", 1.0))\\n+ if self.recompute_activation is None:\\n+ self.recompute_activation = strtobool(os.environ.get(prefix + \"RECOMPUTE_ACTIVATION\", \"False\")) == 1\\n+ if self.use_distributed_optimizer is None:\\n+ self.use_distributed_optimizer = (\\n+ strtobool(os.environ.get(prefix + \"USE_DISTRIBUTED_OPTIMIZER\", \"False\")) == 1\\n+ )\\n+ if self.sequence_parallelism is None:\\n+ self.sequence_parallelism = strtobool(os.environ.get(prefix + \"SEQUENCE_PARALLELISM\", \"False\")) == 1\\n+\\n+ if self.pp_degree > 1:\\n+ self.DDP_impl = \"local\"\\n+ else:\\n+ self.DDP_impl = \"torch\"\\n+\\n+ self.megtron_lm_default_args = {',\n", - " 'from_author': True},\n", - " {'body': \"I don't believe all of those should be changed just for MegatronLM.\",\n", - " 'diff_hunk': '@@ -1032,7 +1174,7 @@ def wait_for_everyone(self):\\n \"\"\"\\n wait_for_everyone()\\n \\n- @on_main_process\\n+ @on_last_process',\n", - " 'from_author': False},\n", - " {'body': 'Here the message should be more in the lines of \"The Megatron-LM only supports those\"',\n", - " 'diff_hunk': '@@ -643,3 +645,291 @@ def load_optimizer(self, accelerator, optimizer, model, input_dir, optimizer_ind\\n # called from all ranks, though only rank0 has a valid param for full_osd\\n sharded_osd = FSDP.scatter_full_optim_state_dict(full_osd, model)\\n optimizer.load_state_dict(sharded_osd)\\n+\\n+\\n+@dataclass\\n+class MegatronLMPlugin:\\n+ \"\"\"\\n+ Plugin for Megatron-LM to enable tensor, pipeline, sequence and data parallelism. Also to enable selective\\n+ activation recomputation and optimized fused kernels.\\n+ \"\"\"\\n+\\n+ tp_degree: int = field(default=None, metadata={\"help\": \"tensor parallelism degree.\"})\\n+ pp_degree: int = field(default=None, metadata={\"help\": \"pipeline parallelism degree.\"})\\n+ num_micro_batches: int = field(default=None, metadata={\"help\": \"number of micro-batches.\"})\\n+ gradient_clipping: float = field(\\n+ default=None, metadata={\"help\": \"gradient clipping value based on global L2 Norm (0 to disable)\"}\\n+ )\\n+ sequence_parallelism: bool = field(\\n+ default=None,\\n+ metadata={\"help\": \"enable sequence parallelism\"},\\n+ )\\n+ recompute_activation: bool = field(\\n+ default=None,\\n+ metadata={\"help\": \"enable selective activation recomputation\"},\\n+ )\\n+ use_distributed_optimizer: bool = field(\\n+ default=None,\\n+ metadata={\"help\": \"enable distributed optimizer\"},\\n+ )\\n+ pipeline_model_parallel_split_rank: int = field(\\n+ default=None, metadata={\"help\": \"Rank where encoder and decoder should be split.\"}\\n+ )\\n+ num_layers_per_virtual_pipeline_stage: int = field(\\n+ default=None, metadata={\"help\": \"Number of layers per virtual pipeline stage.\"}\\n+ )\\n+ is_train_batch_min: str = field(\\n+ default=True,\\n+ metadata={\"help\": \"If both train & eval dataloaders are specified, this will decide the micro_batch_size\"},\\n+ )\\n+ train_iters: int = field(\\n+ default=None,\\n+ metadata={\\n+ \"help\": \"Total number of iterations to train over all training runs. \"\\n+ \"Note that either train-iters or train-samples should be provided when using `MegatronLMDummyScheduler`\"\\n+ },\\n+ )\\n+ train_samples: int = field(\\n+ default=None,\\n+ metadata={\\n+ \"help\": \"Total number of samples to train over all training runs. \"\\n+ \"Note that either train-iters or train-samples should be provided when using `MegatronLMDummyScheduler`\"\\n+ },\\n+ )\\n+ weight_decay_incr_style: str = field(\\n+ default=\"constant\",\\n+ metadata={\\n+ \"help\": \\'Weight decay increment function. choices=[\"constant\", \"linear\", \"cosine\"]. \\'\\n+ \"Only applicable when using `MegatronLMDummyScheduler`.\"\\n+ },\\n+ )\\n+ start_weight_decay: float = field(\\n+ default=None,\\n+ metadata={\\n+ \"help\": \"Initial weight decay coefficient for L2 regularization. \"\\n+ \"Only applicable when using `MegatronLMDummyScheduler`.\"\\n+ },\\n+ )\\n+ end_weight_decay: float = field(\\n+ default=None,\\n+ metadata={\\n+ \"help\": \"End of run weight decay coefficient for L2 regularization. \"\\n+ \"Only applicable when using `MegatronLMDummyScheduler`.\"\\n+ },\\n+ )\\n+ lr_decay_style: str = field(\\n+ default=\"linear\",\\n+ metadata={\\n+ \"help\": \"Learning rate decay function. choices=[\\'constant\\', \\'linear\\', \\'cosine\\']. \"\\n+ \"Only applicable when using `MegatronLMDummyScheduler`.\"\\n+ },\\n+ )\\n+ lr_decay_iters: int = field(\\n+ default=None,\\n+ metadata={\\n+ \"help\": \"Number of iterations for learning rate decay. If None defaults to `train_iters`. \"\\n+ \"Only applicable when using `MegatronLMDummyScheduler`.\"\\n+ },\\n+ )\\n+ lr_decay_samples: int = field(\\n+ default=None,\\n+ metadata={\\n+ \"help\": \"Number of samples for learning rate decay. If None defaults to `train_samples`. \"\\n+ \"Only applicable when using `MegatronLMDummyScheduler`.\"\\n+ },\\n+ )\\n+ lr_warmup_iters: int = field(\\n+ default=None,\\n+ metadata={\\n+ \"help\": \"number of iterations to linearly warmup learning rate over. \"\\n+ \"Only applicable when using `MegatronLMDummyScheduler`.\"\\n+ },\\n+ )\\n+ lr_warmup_samples: int = field(\\n+ default=None,\\n+ metadata={\\n+ \"help\": \"number of samples to linearly warmup learning rate over. \"\\n+ \"Only applicable when using `MegatronLMDummyScheduler`.\"\\n+ },\\n+ )\\n+ lr_warmup_fraction: float = field(\\n+ default=None,\\n+ metadata={\\n+ \"help\": \"fraction of lr-warmup-(iters/samples) to linearly warmup learning rate over. \"\\n+ \"Only applicable when using `MegatronLMDummyScheduler`.\"\\n+ },\\n+ )\\n+ min_lr: float = field(\\n+ default=None,\\n+ metadata={\\n+ \"help\": \"Minumum value for learning rate. The scheduler clip values below this threshold. \"\\n+ \"Only applicable when using `MegatronLMDummyScheduler`.\"\\n+ },\\n+ )\\n+ consumed_samples: List[int] = field(\\n+ default=None,\\n+ metadata={\\n+ \"help\": \"Number of samples consumed in the same order as the dataloaders to `accelerator.prepare` call.\"\\n+ },\\n+ )\\n+ no_wd_decay_cond: Optional[Callable] = field(default=None, metadata={\"help\": \"Condition to disable weight decay.\"})\\n+ scale_lr_cond: Optional[Callable] = field(default=None, metadata={\"help\": \"Condition to scale learning rate.\"})\\n+ lr_mult: float = field(default=1.0, metadata={\"help\": \"Learning rate multiplier.\"})\\n+ megatron_dataset_flag: bool = field(\\n+ default=False,\\n+ metadata={\"help\": \"Whether the format of dataset follows Megatron-LM Indexed/Cached/MemoryMapped format.\"},\\n+ )\\n+\\n+ def __post_init__(self):\\n+ prefix = \"MEGATRON_\"\\n+ if self.tp_degree is None:\\n+ self.tp_degree = int(os.environ.get(prefix + \"TP_DEGREE\", 1))\\n+ if self.pp_degree is None:\\n+ self.pp_degree = int(os.environ.get(prefix + \"PP_DEGREE\", 1))\\n+ if self.num_micro_batches is None:\\n+ self.num_micro_batches = int(os.environ.get(prefix + \"NUM_MICRO_BATCHES\", 1))\\n+ if self.gradient_clipping is None:\\n+ self.gradient_clipping = float(os.environ.get(prefix + \"GRADIENT_CLIPPING\", 1.0))\\n+ if self.recompute_activation is None:\\n+ self.recompute_activation = strtobool(os.environ.get(prefix + \"RECOMPUTE_ACTIVATION\", \"False\")) == 1\\n+ if self.use_distributed_optimizer is None:\\n+ self.use_distributed_optimizer = (\\n+ strtobool(os.environ.get(prefix + \"USE_DISTRIBUTED_OPTIMIZER\", \"False\")) == 1\\n+ )\\n+ if self.sequence_parallelism is None:\\n+ self.sequence_parallelism = strtobool(os.environ.get(prefix + \"SEQUENCE_PARALLELISM\", \"False\")) == 1\\n+\\n+ if self.pp_degree > 1:\\n+ self.DDP_impl = \"local\"\\n+ else:\\n+ self.DDP_impl = \"torch\"\\n+\\n+ if self.consumed_samples is not None:\\n+ if len(self.consumed_samples) == 1:\\n+ self.consumed_samples.extend([0, 0])\\n+ elif len(self.consumed_samples) == 2:\\n+ self.consumed_samples.append(0)\\n+\\n+ self.megatron_lm_default_args = {\\n+ \"tensor_model_parallel_size\": self.tp_degree,\\n+ \"pipeline_model_parallel_size\": self.pp_degree,\\n+ \"pipeline_model_parallel_split_rank\": self.pipeline_model_parallel_split_rank,\\n+ \"num_layers_per_virtual_pipeline_stage\": self.num_layers_per_virtual_pipeline_stage,\\n+ \"DDP_impl\": self.DDP_impl,\\n+ \"use_distributed_optimizer\": self.use_distributed_optimizer,\\n+ \"recompute_activations\": self.recompute_activation,\\n+ \"sequence_parallel\": self.sequence_parallelism,\\n+ \"clip_grad\": self.gradient_clipping,\\n+ \"num_micro_batches\": self.num_micro_batches,\\n+ \"consumed_samples\": self.consumed_samples,\\n+ \"no_wd_decay_cond\": self.no_wd_decay_cond,\\n+ \"scale_lr_cond\": self.scale_lr_cond,\\n+ \"lr_mult\": self.lr_mult,\\n+ \"megatron_dataset_flag\": self.megatron_dataset_flag,\\n+ }\\n+\\n+ def set_network_size_args(self, model):\\n+ # Check if the model is either BERT, GPT or T5 else raise error\\n+ # set \\'num_layers\\', \\'hidden_size\\', \\'num_attention_heads\\', \\'max_position_embeddings\\'\\n+ if \"bert\" in model.__class__.__name__.lower():\\n+ model_type_name = \"bert\"\\n+ num_layers = model.config.num_hidden_layers\\n+ hidden_size = model.config.hidden_size\\n+ num_attention_heads = model.config.num_attention_heads\\n+ max_position_embeddings = model.config.max_position_embeddings\\n+ num_labels = model.config.num_labels\\n+ orig_vocab_size = model.config.vocab_size\\n+ if \"maskedlm\" in model.__class__.__name__.lower():\\n+ pretraining_flag = True\\n+\\n+ elif \"gpt\" in model.__class__.__name__.lower():\\n+ model_type_name = \"gpt\"\\n+ num_layers = model.config.n_layer\\n+ hidden_size = model.config.n_embd\\n+ num_attention_heads = model.config.n_head\\n+ max_position_embeddings = model.config.n_positions\\n+ orig_vocab_size = model.config.vocab_size\\n+ pretraining_flag = True\\n+ elif \"t5\" in model.__class__.__name__.lower():\\n+ model_type_name = \"t5\"\\n+ num_layers = model.config.num_layers\\n+ hidden_size = model.config.d_model\\n+ num_attention_heads = model.config.num_heads\\n+ max_position_embeddings = model.config.n_positions\\n+ orig_vocab_size = model.config.vocab_size\\n+ pretraining_flag = True\\n+ else:\\n+ raise ValueError(\"Model is not BERT, GPT or T5. Please check the model you are using.\")',\n", - " 'from_author': False},\n", - " {'body': 'You should check the `model_type` attribute instead of the class name IMO.',\n", - " 'diff_hunk': '@@ -643,3 +645,291 @@ def load_optimizer(self, accelerator, optimizer, model, input_dir, optimizer_ind\\n # called from all ranks, though only rank0 has a valid param for full_osd\\n sharded_osd = FSDP.scatter_full_optim_state_dict(full_osd, model)\\n optimizer.load_state_dict(sharded_osd)\\n+\\n+\\n+@dataclass\\n+class MegatronLMPlugin:\\n+ \"\"\"\\n+ Plugin for Megatron-LM to enable tensor, pipeline, sequence and data parallelism. Also to enable selective\\n+ activation recomputation and optimized fused kernels.\\n+ \"\"\"\\n+\\n+ tp_degree: int = field(default=None, metadata={\"help\": \"tensor parallelism degree.\"})\\n+ pp_degree: int = field(default=None, metadata={\"help\": \"pipeline parallelism degree.\"})\\n+ num_micro_batches: int = field(default=None, metadata={\"help\": \"number of micro-batches.\"})\\n+ gradient_clipping: float = field(\\n+ default=None, metadata={\"help\": \"gradient clipping value based on global L2 Norm (0 to disable)\"}\\n+ )\\n+ sequence_parallelism: bool = field(\\n+ default=None,\\n+ metadata={\"help\": \"enable sequence parallelism\"},\\n+ )\\n+ recompute_activation: bool = field(\\n+ default=None,\\n+ metadata={\"help\": \"enable selective activation recomputation\"},\\n+ )\\n+ use_distributed_optimizer: bool = field(\\n+ default=None,\\n+ metadata={\"help\": \"enable distributed optimizer\"},\\n+ )\\n+ pipeline_model_parallel_split_rank: int = field(\\n+ default=None, metadata={\"help\": \"Rank where encoder and decoder should be split.\"}\\n+ )\\n+ num_layers_per_virtual_pipeline_stage: int = field(\\n+ default=None, metadata={\"help\": \"Number of layers per virtual pipeline stage.\"}\\n+ )\\n+ is_train_batch_min: str = field(\\n+ default=True,\\n+ metadata={\"help\": \"If both train & eval dataloaders are specified, this will decide the micro_batch_size\"},\\n+ )\\n+ train_iters: int = field(\\n+ default=None,\\n+ metadata={\\n+ \"help\": \"Total number of iterations to train over all training runs. \"\\n+ \"Note that either train-iters or train-samples should be provided when using `MegatronLMDummyScheduler`\"\\n+ },\\n+ )\\n+ train_samples: int = field(\\n+ default=None,\\n+ metadata={\\n+ \"help\": \"Total number of samples to train over all training runs. \"\\n+ \"Note that either train-iters or train-samples should be provided when using `MegatronLMDummyScheduler`\"\\n+ },\\n+ )\\n+ weight_decay_incr_style: str = field(\\n+ default=\"constant\",\\n+ metadata={\\n+ \"help\": \\'Weight decay increment function. choices=[\"constant\", \"linear\", \"cosine\"]. \\'\\n+ \"Only applicable when using `MegatronLMDummyScheduler`.\"\\n+ },\\n+ )\\n+ start_weight_decay: float = field(\\n+ default=None,\\n+ metadata={\\n+ \"help\": \"Initial weight decay coefficient for L2 regularization. \"\\n+ \"Only applicable when using `MegatronLMDummyScheduler`.\"\\n+ },\\n+ )\\n+ end_weight_decay: float = field(\\n+ default=None,\\n+ metadata={\\n+ \"help\": \"End of run weight decay coefficient for L2 regularization. \"\\n+ \"Only applicable when using `MegatronLMDummyScheduler`.\"\\n+ },\\n+ )\\n+ lr_decay_style: str = field(\\n+ default=\"linear\",\\n+ metadata={\\n+ \"help\": \"Learning rate decay function. choices=[\\'constant\\', \\'linear\\', \\'cosine\\']. \"\\n+ \"Only applicable when using `MegatronLMDummyScheduler`.\"\\n+ },\\n+ )\\n+ lr_decay_iters: int = field(\\n+ default=None,\\n+ metadata={\\n+ \"help\": \"Number of iterations for learning rate decay. If None defaults to `train_iters`. \"\\n+ \"Only applicable when using `MegatronLMDummyScheduler`.\"\\n+ },\\n+ )\\n+ lr_decay_samples: int = field(\\n+ default=None,\\n+ metadata={\\n+ \"help\": \"Number of samples for learning rate decay. If None defaults to `train_samples`. \"\\n+ \"Only applicable when using `MegatronLMDummyScheduler`.\"\\n+ },\\n+ )\\n+ lr_warmup_iters: int = field(\\n+ default=None,\\n+ metadata={\\n+ \"help\": \"number of iterations to linearly warmup learning rate over. \"\\n+ \"Only applicable when using `MegatronLMDummyScheduler`.\"\\n+ },\\n+ )\\n+ lr_warmup_samples: int = field(\\n+ default=None,\\n+ metadata={\\n+ \"help\": \"number of samples to linearly warmup learning rate over. \"\\n+ \"Only applicable when using `MegatronLMDummyScheduler`.\"\\n+ },\\n+ )\\n+ lr_warmup_fraction: float = field(\\n+ default=None,\\n+ metadata={\\n+ \"help\": \"fraction of lr-warmup-(iters/samples) to linearly warmup learning rate over. \"\\n+ \"Only applicable when using `MegatronLMDummyScheduler`.\"\\n+ },\\n+ )\\n+ min_lr: float = field(\\n+ default=None,\\n+ metadata={\\n+ \"help\": \"Minumum value for learning rate. The scheduler clip values below this threshold. \"\\n+ \"Only applicable when using `MegatronLMDummyScheduler`.\"\\n+ },\\n+ )\\n+ consumed_samples: List[int] = field(\\n+ default=None,\\n+ metadata={\\n+ \"help\": \"Number of samples consumed in the same order as the dataloaders to `accelerator.prepare` call.\"\\n+ },\\n+ )\\n+ no_wd_decay_cond: Optional[Callable] = field(default=None, metadata={\"help\": \"Condition to disable weight decay.\"})\\n+ scale_lr_cond: Optional[Callable] = field(default=None, metadata={\"help\": \"Condition to scale learning rate.\"})\\n+ lr_mult: float = field(default=1.0, metadata={\"help\": \"Learning rate multiplier.\"})\\n+ megatron_dataset_flag: bool = field(\\n+ default=False,\\n+ metadata={\"help\": \"Whether the format of dataset follows Megatron-LM Indexed/Cached/MemoryMapped format.\"},\\n+ )\\n+\\n+ def __post_init__(self):\\n+ prefix = \"MEGATRON_\"\\n+ if self.tp_degree is None:\\n+ self.tp_degree = int(os.environ.get(prefix + \"TP_DEGREE\", 1))\\n+ if self.pp_degree is None:\\n+ self.pp_degree = int(os.environ.get(prefix + \"PP_DEGREE\", 1))\\n+ if self.num_micro_batches is None:\\n+ self.num_micro_batches = int(os.environ.get(prefix + \"NUM_MICRO_BATCHES\", 1))\\n+ if self.gradient_clipping is None:\\n+ self.gradient_clipping = float(os.environ.get(prefix + \"GRADIENT_CLIPPING\", 1.0))\\n+ if self.recompute_activation is None:\\n+ self.recompute_activation = strtobool(os.environ.get(prefix + \"RECOMPUTE_ACTIVATION\", \"False\")) == 1\\n+ if self.use_distributed_optimizer is None:\\n+ self.use_distributed_optimizer = (\\n+ strtobool(os.environ.get(prefix + \"USE_DISTRIBUTED_OPTIMIZER\", \"False\")) == 1\\n+ )\\n+ if self.sequence_parallelism is None:\\n+ self.sequence_parallelism = strtobool(os.environ.get(prefix + \"SEQUENCE_PARALLELISM\", \"False\")) == 1\\n+\\n+ if self.pp_degree > 1:\\n+ self.DDP_impl = \"local\"\\n+ else:\\n+ self.DDP_impl = \"torch\"\\n+\\n+ if self.consumed_samples is not None:\\n+ if len(self.consumed_samples) == 1:\\n+ self.consumed_samples.extend([0, 0])\\n+ elif len(self.consumed_samples) == 2:\\n+ self.consumed_samples.append(0)\\n+\\n+ self.megatron_lm_default_args = {\\n+ \"tensor_model_parallel_size\": self.tp_degree,\\n+ \"pipeline_model_parallel_size\": self.pp_degree,\\n+ \"pipeline_model_parallel_split_rank\": self.pipeline_model_parallel_split_rank,\\n+ \"num_layers_per_virtual_pipeline_stage\": self.num_layers_per_virtual_pipeline_stage,\\n+ \"DDP_impl\": self.DDP_impl,\\n+ \"use_distributed_optimizer\": self.use_distributed_optimizer,\\n+ \"recompute_activations\": self.recompute_activation,\\n+ \"sequence_parallel\": self.sequence_parallelism,\\n+ \"clip_grad\": self.gradient_clipping,\\n+ \"num_micro_batches\": self.num_micro_batches,\\n+ \"consumed_samples\": self.consumed_samples,\\n+ \"no_wd_decay_cond\": self.no_wd_decay_cond,\\n+ \"scale_lr_cond\": self.scale_lr_cond,\\n+ \"lr_mult\": self.lr_mult,\\n+ \"megatron_dataset_flag\": self.megatron_dataset_flag,\\n+ }\\n+\\n+ def set_network_size_args(self, model):\\n+ # Check if the model is either BERT, GPT or T5 else raise error\\n+ # set \\'num_layers\\', \\'hidden_size\\', \\'num_attention_heads\\', \\'max_position_embeddings\\'\\n+ if \"bert\" in model.__class__.__name__.lower():',\n", - " 'from_author': False},\n", - " {'body': 'To be cleaned up?',\n", - " 'diff_hunk': '@@ -0,0 +1,821 @@\\n+# Copyright 2022 The HuggingFace Team. All rights reserved.\\n+#\\n+# Licensed under the Apache License, Version 2.0 (the \"License\");\\n+# you may not use this file except in compliance with the License.\\n+# You may obtain a copy of the License at\\n+#\\n+# http://www.apache.org/licenses/LICENSE-2.0\\n+#\\n+# Unless required by applicable law or agreed to in writing, software\\n+# distributed under the License is distributed on an \"AS IS\" BASIS,\\n+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\\n+# See the License for the specific language governing permissions and\\n+# limitations under the License.\\n+\\n+import argparse\\n+from abc import ABC\\n+from functools import partial\\n+\\n+import torch\\n+import torch.nn.functional as F\\n+from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss\\n+\\n+from ..optimizer import AcceleratedOptimizer\\n+from ..scheduler import AcceleratedScheduler\\n+from .imports import is_megatron_lm_available, is_transformers_available\\n+from .operations import recursively_apply\\n+\\n+\\n+if is_transformers_available():\\n+ from transformers.modeling_outputs import (\\n+ CausalLMOutputWithCrossAttentions,\\n+ Seq2SeqLMOutput,\\n+ SequenceClassifierOutput,\\n+ )\\n+ from transformers.models.bert.modeling_bert import BertForPreTrainingOutput\\n+\\n+\\n+if is_megatron_lm_available():\\n+ from megatron import get_args, get_num_microbatches, get_timers, mpu\\n+ from megatron.arguments import _add_data_args\\n+ from megatron.data.data_samplers import build_pretraining_data_loader\\n+ from megatron.data.dataset_utils import build_train_valid_test_datasets\\n+ from megatron.initialize import (\\n+ get_tensor_model_parallel_group,\\n+ get_tensor_model_parallel_src_rank,\\n+ initialize_megatron,\\n+ set_jit_fusion_options,\\n+ )\\n+ from megatron.model import BertModel, GPTModel, ModelType, T5Model\\n+ from megatron.model.classification import Classification\\n+ from megatron.model.module import MegatronModule\\n+ from megatron.optimizer import get_megatron_optimizer\\n+ from megatron.schedules import get_forward_backward_func\\n+ from megatron.tokenizer.tokenizer import _vocab_size_with_padding\\n+ from megatron.training import build_train_valid_test_data_iterators, get_model, get_optimizer_param_scheduler\\n+ from megatron.utils import average_losses_across_data_parallel_group, get_ltor_masks_and_position_ids\\n+\\n+\\n+def model_provider_func(accelerator, pre_process=True, post_process=True, add_encoder=True, add_decoder=True):\\n+ \"\"\"Build the model.\"\"\"\\n+ args = get_args()\\n+ mode = \"pre-training\" if args.pretraining_flag else \"fine-tuning\"\\n+ accelerator.print(f\"Building {args.model_type_name} model in the {mode} mode.\")\\n+ if args.model_type_name == \"bert\":\\n+ if args.pretraining_flag:\\n+ num_tokentypes = 2 if args.bert_binary_head else 0\\n+ model = BertModel(\\n+ num_tokentypes=num_tokentypes,\\n+ add_binary_head=args.bert_binary_head,\\n+ parallel_output=True,\\n+ pre_process=pre_process,\\n+ post_process=post_process,\\n+ )\\n+ else:\\n+\\n+ model = Classification(\\n+ num_classes=args.num_labels, num_tokentypes=2, pre_process=pre_process, post_process=post_process\\n+ )\\n+ elif args.model_type_name == \"gpt2\":\\n+ model = GPTModel(num_tokentypes=0, parallel_output=True, pre_process=pre_process, post_process=post_process)\\n+ elif args.model_type_name == \"t5\":\\n+ model = T5Model(\\n+ num_tokentypes=0,\\n+ parallel_output=True,\\n+ pre_process=pre_process,\\n+ post_process=post_process,\\n+ add_encoder=add_encoder,\\n+ add_decoder=add_decoder,\\n+ )\\n+ else:\\n+ raise ValueError(f\"Unknown model type: {args.model_type_name}\")\\n+ return model\\n+\\n+\\n+def prepare_data_loader(accelerator, dataloader, consumed_samples_index=-1, consumed_samples=0):\\n+ accelerator.print(\"Preparing dataloader\")\\n+ args = get_args()\\n+ if not args.megatron_dataset_flag:\\n+ if args.consumed_samples is not None:\\n+ consumed_samples = args.consumed_samples[consumed_samples_index]\\n+ return build_pretraining_data_loader(dataloader.dataset, consumed_samples)\\n+ else:\\n+ if args.consumed_samples is not None:\\n+ (\\n+ args.consumed_train_samples,\\n+ args.consumed_valid_samples,\\n+ args.consumed_test_samples,\\n+ ) = args.consumed_samples\\n+ else:\\n+ args.consumed_train_samples, args.consumed_valid_samples, args.consumed_test_samples = 0, 0, 0\\n+ train_data_iterator, valid_data_iterator, test_data_iterator = build_train_valid_test_data_iterators(\\n+ MegatronLMDummyDataLoader.get_train_valid_test_datasets_provider\\n+ )\\n+ return train_data_iterator, valid_data_iterator, test_data_iterator\\n+\\n+\\n+def prepare_model(accelerator):\\n+ accelerator.print(\"Preparing model\")\\n+ args = get_args()\\n+ if args.model_type_name == \"bert\" or args.model_type_name == \"gpt\":\\n+ model_type = ModelType.encoder_or_decoder\\n+ elif args.model_type_name == \"t5\":\\n+ model_type = ModelType.encoder_and_decoder\\n+ model = get_model(model_provider_func, model_type)\\n+ return model\\n+\\n+\\n+def prepare_optimizer(accelerator, model):\\n+ accelerator.print(\"Preparing optimizer\")\\n+ args = get_args()\\n+ optimizer = get_megatron_optimizer(model, args.no_wd_decay_cond, args.scale_lr_cond, args.lr_mult)\\n+ return optimizer\\n+\\n+\\n+def prepare_scheduler(accelerator, optimizer, scheduler, is_dummy_scheduler):\\n+ accelerator.print(\"Preparing scheduler\")\\n+ if is_dummy_scheduler:\\n+ scheduler = get_optimizer_param_scheduler(optimizer)\\n+ else:\\n+ scheduler.optimizer = optimizer\\n+ if isinstance(scheduler, torch.optim.lr_scheduler.LambdaLR):\\n+ scheduler = scheduler.__class__(optimizer, scheduler.lr_lambdas[0])\\n+ return scheduler\\n+\\n+\\n+def initialize(accelerator, extra_args_provider=None, args_defaults={}):\\n+ accelerator.print(\"Initializing Megatron-LM\")\\n+ # Initalize and get arguments\\n+ initialize_megatron(extra_args_provider=extra_args_provider, args_defaults=args_defaults, ignore_unknown_args=True)\\n+ # Set pytorch JIT layer fusion options and warmup JIT functions.\\n+ set_jit_fusion_options()\\n+ args = get_args()\\n+ args.padded_vocab_size = _vocab_size_with_padding(args.orig_vocab_size, args)\\n+ if args.model_type_name == \"bert\" and args.pretraining_flag and args.num_labels == 2:\\n+ args.bert_binary_head = True\\n+ # if args.virtual_pipeline_model_parallel_size is not None:\\n+ # raise Exception(\"Virtual pipeline model parallelism is not supported in Accelerate\")',\n", - " 'from_author': False},\n", - " {'body': \"The aliases should be done in the `.utils` module: if we have another integration like this, we won't be able to have both names.\",\n", - " 'diff_hunk': '@@ -71,6 +73,21 @@\\n DummyScheduler,\\n )\\n \\n+if is_megatron_lm_available():\\n+ from .utils import (\\n+ MegatronEngine,\\n+ MegatronLMDummyDataLoader,\\n+ MegatronLMDummyScheduler,\\n+ MegatronLMOptimizerWrapper,\\n+ MegatronLMSchedulerWrapper,\\n+ )\\n+ from .utils import initialize as megatron_lm_initialize\\n+ from .utils import prepare_data_loader as megatron_lm_prepare_data_loader\\n+ from .utils import prepare_model as megatron_lm_prepare_model\\n+ from .utils import prepare_optimizer as megatron_lm_prepare_optimizer\\n+ from .utils import prepare_scheduler as megatron_lm_prepare_scheduler',\n", - " 'from_author': False},\n", - " {'body': 'AS said above, those should probably have the `megatron_lm` prefix here.',\n", - " 'diff_hunk': '@@ -94,6 +96,23 @@\\n )\\n \\n from .launch import PrepareForLaunch, _filter_args, get_launch_prefix\\n+from .megatron_lm import (\\n+ AbstractTrainStep,\\n+ BertTrainStep,\\n+ GPTTrainStep,\\n+ MegatronEngine,\\n+ MegatronLMDummyDataLoader,\\n+ MegatronLMDummyScheduler,\\n+ MegatronLMOptimizerWrapper,\\n+ MegatronLMSchedulerWrapper,\\n+ T5TrainStep,\\n+ avg_losses_across_data_parallel_group,\\n+ initialize,\\n+ prepare_data_loader,\\n+ prepare_model,\\n+ prepare_optimizer,\\n+ prepare_scheduler,',\n", - " 'from_author': False},\n", - " {'body': 'Nit: in a followup PR it would be nice to have all of those in one constant.',\n", - " 'diff_hunk': '@@ -224,6 +224,7 @@ def gather(tensor):\\n DistributedType.DEEPSPEED,\\n DistributedType.MULTI_GPU,\\n DistributedType.FSDP,\\n+ DistributedType.MEGATRON_LM,',\n", - " 'from_author': False},\n", - " {'body': 'Done.',\n", - " 'diff_hunk': '@@ -71,6 +73,21 @@\\n DummyScheduler,\\n )\\n \\n+if is_megatron_lm_available():\\n+ from .utils import (\\n+ MegatronEngine,\\n+ MegatronLMDummyDataLoader,\\n+ MegatronLMDummyScheduler,\\n+ MegatronLMOptimizerWrapper,\\n+ MegatronLMSchedulerWrapper,\\n+ )\\n+ from .utils import initialize as megatron_lm_initialize\\n+ from .utils import prepare_data_loader as megatron_lm_prepare_data_loader\\n+ from .utils import prepare_model as megatron_lm_prepare_model\\n+ from .utils import prepare_optimizer as megatron_lm_prepare_optimizer\\n+ from .utils import prepare_scheduler as megatron_lm_prepare_scheduler',\n", - " 'from_author': True},\n", - " {'body': 'Done.',\n", - " 'diff_hunk': '@@ -94,6 +96,23 @@\\n )\\n \\n from .launch import PrepareForLaunch, _filter_args, get_launch_prefix\\n+from .megatron_lm import (\\n+ AbstractTrainStep,\\n+ BertTrainStep,\\n+ GPTTrainStep,\\n+ MegatronEngine,\\n+ MegatronLMDummyDataLoader,\\n+ MegatronLMDummyScheduler,\\n+ MegatronLMOptimizerWrapper,\\n+ MegatronLMSchedulerWrapper,\\n+ T5TrainStep,\\n+ avg_losses_across_data_parallel_group,\\n+ initialize,\\n+ prepare_data_loader,\\n+ prepare_model,\\n+ prepare_optimizer,\\n+ prepare_scheduler,',\n", - " 'from_author': True},\n", - " {'body': 'Can be for another PR, but would be nice to explain the difference between those kinds of parallelism and what is parallelized exactly.',\n", - " 'diff_hunk': '@@ -0,0 +1,507 @@\\n+\\n+\\n+\\n+# Megatron-LM\\n+\\n+[Megatron-LM](https://github.com/NVIDIA/Megatron-LM) enables training large transformer language models at scale.\\n+It provides efficient tensor, pipeline and sequence based model parallelism for pre-training transformer based\\n+Language Models such as [GPT](https://arxiv.org/abs/2005.14165) (Decoder Only), [BERT](https://arxiv.org/pdf/1810.04805.pdf) (Encoder Only) and [T5](https://arxiv.org/abs/1910.10683) (Encoder-Decoder).\\n+For detailed information and how things work behind the scene please refer the github [repo](https://github.com/NVIDIA/Megatron-LM).\\n+\\n+## What is integrated?\\n+\\n+Accelerate integrates following feature of Megatron-LM to enable large scale pre-training/finetuning\\n+of BERT (Encoder), GPT (Decoder) or T5 models (Encoder and Decoder):\\n+\\n+a. **Tensor Parallelism (TP)**: Reduces memory footprint without much additional communication on intra-node ranks\\n+\\n+b. **Pipeline Parallelism (PP)**: Reduces memory footprint and enables large scale training via inter-node parallelization. \\n+Reduces the bubble of naive PP via PipeDream-Flush schedule/1F1B schedule and Interleaved 1F1B schedule. \\n+Layers are distributed uniformly across PP stages.\\n+\\n+c. **Sequence Parallelism (SP)**: Reduces memory footprint without any additional communication. Only applicable when using TP.\\n+\\n+d. **Data Parallelism (DP)** via Distributed Optimizer: Reduces memory footprint by sharding optimizer states and gradients across DP ranks.',\n", - " 'from_author': False},\n", - " {'body': 'PR #758 to resolve this',\n", - " 'diff_hunk': '@@ -224,6 +224,7 @@ def gather(tensor):\\n DistributedType.DEEPSPEED,\\n DistributedType.MULTI_GPU,\\n DistributedType.FSDP,\\n+ DistributedType.MEGATRON_LM,',\n", - " 'from_author': True},\n", - " {'body': 'PR #759 to address this.',\n", - " 'diff_hunk': '@@ -0,0 +1,507 @@\\n+\\n+\\n+\\n+# Megatron-LM\\n+\\n+[Megatron-LM](https://github.com/NVIDIA/Megatron-LM) enables training large transformer language models at scale.\\n+It provides efficient tensor, pipeline and sequence based model parallelism for pre-training transformer based\\n+Language Models such as [GPT](https://arxiv.org/abs/2005.14165) (Decoder Only), [BERT](https://arxiv.org/pdf/1810.04805.pdf) (Encoder Only) and [T5](https://arxiv.org/abs/1910.10683) (Encoder-Decoder).\\n+For detailed information and how things work behind the scene please refer the github [repo](https://github.com/NVIDIA/Megatron-LM).\\n+\\n+## What is integrated?\\n+\\n+Accelerate integrates following feature of Megatron-LM to enable large scale pre-training/finetuning\\n+of BERT (Encoder), GPT (Decoder) or T5 models (Encoder and Decoder):\\n+\\n+a. **Tensor Parallelism (TP)**: Reduces memory footprint without much additional communication on intra-node ranks\\n+\\n+b. **Pipeline Parallelism (PP)**: Reduces memory footprint and enables large scale training via inter-node parallelization. \\n+Reduces the bubble of naive PP via PipeDream-Flush schedule/1F1B schedule and Interleaved 1F1B schedule. \\n+Layers are distributed uniformly across PP stages.\\n+\\n+c. **Sequence Parallelism (SP)**: Reduces memory footprint without any additional communication. Only applicable when using TP.\\n+\\n+d. **Data Parallelism (DP)** via Distributed Optimizer: Reduces memory footprint by sharding optimizer states and gradients across DP ranks.',\n", - " 'from_author': True}],\n", - " 'context': [{'body': '_The documentation is not available anymore as the PR was closed or merged._',\n", - " 'from_author': False}],\n", - " 'url': 'https://api.github.com/repos/huggingface/accelerate/pulls/667'},\n", - " 1040133897: {'diff': 'diff --git a/src/accelerate/accelerator.py b/src/accelerate/accelerator.py\\nindex 4991ddbe4..353ccf894 100644\\n--- a/src/accelerate/accelerator.py\\n+++ b/src/accelerate/accelerator.py\\n@@ -140,7 +140,7 @@ class Accelerator:\\n A list of `KwargHandler` to customize how the objects related to distributed training or mixed precision\\n are created. See [kwargs](kwargs) for more information.\\n \\n- **Attributes:**\\n+ **Available attributes:**\\n \\n - **device** (`torch.device`) -- The device to use.\\n - **distributed_type** ([`~utils.DistributedType`]) -- The distributed training configuration.\\n@@ -463,6 +463,28 @@ def no_sync(self, model):\\n Args:\\n model (`torch.nn.Module`):\\n PyTorch Module that was prepared with `Accelerator.prepare`\\n+\\n+ Example:\\n+\\n+ ```python\\n+ >>> from accelerate import Accelerator\\n+\\n+ >>> accelerator = Accelerator()\\n+ >>> dataloader, model, optimizer = accelerator.prepare(dataloader, model, optimizer)\\n+ >>> input_a = next(iter(dataloader))\\n+ >>> input_b = next(iter(dataloader))\\n+\\n+ >>> with accelerator.no_sync():\\n+ ... outputs = model(input_a)\\n+ ... loss = loss_func(outputs)\\n+ ... accelerator.backward(loss)\\n+ ... # No synchronization across processes, only accumulate gradients\\n+ >>> outputs = model(input_b)\\n+ >>> accelerator.backward(loss)\\n+ >>> # Synchronization across all processes\\n+ >>> optimizer.step()\\n+ >>> optimizer.zero_grad()\\n+ ```\\n \"\"\"\\n context = contextlib.nullcontext\\n if self.use_distributed:\\n@@ -492,6 +514,24 @@ def accumulate(self, model):\\n Args:\\n model (`torch.nn.Module`):\\n PyTorch Module that was prepared with `Accelerator.prepare`\\n+\\n+ Example:\\n+\\n+ ```python\\n+ >>> from accelerate import Accelerator\\n+\\n+ >>> accelerator = Accelerator(gradient_accumulation_steps=2)\\n+ >>> dataloader, model, optimizer, scheduler = accelerator.prepare(dataloader, model, optimizer, scheduler)\\n+\\n+ >>> with accelerator.accumulate():\\n+ ... for input, output in dataloader:\\n+ ... outputs = model(input)\\n+ ... loss = loss_func(outputs)\\n+ ... loss.backward()\\n+ ... optimizer.step()\\n+ ... scheduler.step()\\n+ ... optimizer.zero_grad()\\n+ ```\\n \"\"\"\\n self._do_sync()\\n if self.sync_gradients:\\n@@ -873,7 +913,10 @@ def prepare_scheduler(self, scheduler):\\n \\n def backward(self, loss, **kwargs):\\n \"\"\"\\n- Use `accelerator.backward(loss)` in lieu of `loss.backward()`.\\n+ Scales the gradients in accordance to `Accelerator.gradient_accumulation_steps` and calls the correct\\n+ `backward()` based on the configuration.\\n+\\n+ Should be used in lieu of `loss.backward()`.\\n \"\"\"\\n loss /= self.gradient_accumulation_steps\\n if self.distributed_type == DistributedType.DEEPSPEED:\\n@@ -906,6 +949,24 @@ def unscale_gradients(self, optimizer=None):\\n def clip_grad_norm_(self, parameters, max_norm, norm_type=2):\\n \"\"\"\\n Should be used in place of `torch.nn.utils.clip_grad_norm_`.\\n+\\n+ Example:\\n+\\n+ ```python\\n+ >>> from accelerate import Accelerator\\n+\\n+ >>> accelerator = Accelerator(gradient_accumulation_steps=2)\\n+ >>> dataloader, model, optimizer, scheduler = accelerator.prepare(dataloader, model, optimizer, scheduler)\\n+\\n+ >>> for (input, target) in dataloader:\\n+ ... optimizer.zero_grad()\\n+ ... output = model(input)\\n+ ... loss = loss_func(output, target)\\n+ ... accelerator.backward(loss)\\n+ ... if accelerator.sync_gradients:\\n+ ... accelerator.clip_grad_norm_(model.parameters(), max_grad_norm)\\n+ ... optimizer.step()\\n+ ```\\n \"\"\"\\n if self.distributed_type == DistributedType.FSDP:\\n self.unscale_gradients()\\n@@ -923,6 +984,24 @@ def clip_grad_norm_(self, parameters, max_norm, norm_type=2):\\n def clip_grad_value_(self, parameters, clip_value):\\n \"\"\"\\n Should be used in place of `torch.nn.utils.clip_grad_value_`.\\n+\\n+ Example:\\n+\\n+ ```python\\n+ >>> from accelerate import Accelerator\\n+\\n+ >>> accelerator = Accelerator(gradient_accumulation_steps=2)\\n+ >>> dataloader, model, optimizer, scheduler = accelerator.prepare(dataloader, model, optimizer, scheduler)\\n+\\n+ >>> for (input, target) in dataloader:\\n+ ... optimizer.zero_grad()\\n+ ... output = model(input)\\n+ ... loss = loss_func(output, target)\\n+ ... accelerator.backward(loss)\\n+ ... if accelerator.sync_gradients:\\n+ ... accelerator.clip_grad_value_(model.parameters(), clip_value)\\n+ ... optimizer.step()\\n+ ```\\n \"\"\"\\n if self.distributed_type in [DistributedType.DEEPSPEED, DistributedType.FSDP]:\\n raise Exception(\"DeepSpeed and FSDP do not support `clip_grad_value_`. Use `clip_grad_norm_` instead.\")\\n@@ -1124,6 +1203,13 @@ def save_state(self, output_dir: str):\\n \"\"\"\\n Saves the current states of the model, optimizer, scaler, RNG generators, and registered objects.\\n \\n+ \\n+\\n+ Should only be used when wanting to save a checkpoint during training and restoring the state in the same\\n+ environment.\\n+\\n+ \\n+\\n Args:\\n output_dir (`str` or `os.PathLike`):\\n The name of the folder to save all relevant weights and states.\\n@@ -1179,6 +1265,12 @@ def load_state(self, input_dir: str):\\n \"\"\"\\n Loads the current states of the model, optimizer, scaler, RNG generators, and registered objects.\\n \\n+ \\n+\\n+ Should only be used in conjunction with [`Accelerator.save_state`].\\n+\\n+ \\n+\\n Args:\\n input_dir (`str` or `os.PathLike`):\\n The name of the folder all relevant weights and states were saved in.\\ndiff --git a/src/accelerate/logging.py b/src/accelerate/logging.py\\nindex 2128cdca7..2c67e24eb 100644\\n--- a/src/accelerate/logging.py\\n+++ b/src/accelerate/logging.py\\n@@ -49,15 +49,20 @@ def get_logger(name: str):\\n \\n If a log should be called on all processes, pass `main_process_only=False`\\n \\n- E.g.\\n- ```python\\n- logger.info(\"My log\", main_process_only=False)\\n- logger.debug(\"My log\", main_process_only=False)\\n- ```\\n-\\n Args:\\n name (`str`):\\n The name for the logger, such as `__file__`\\n+\\n+ Example:\\n+\\n+ ```python\\n+ >>> from accelerate.logging import get_logger\\n+\\n+ >>> logger = get_logger(__name__)\\n+\\n+ >>> logger.info(\"My log\", main_process_only=False)\\n+ >>> logger.debug(\"My log\", main_process_only=True)\\n+ ```\\n \"\"\"\\n logger = logging.getLogger(name)\\n return MultiProcessAdapter(logger, {})\\ndiff --git a/src/accelerate/scheduler.py b/src/accelerate/scheduler.py\\nindex 75695091e..4ae91a657 100644\\n--- a/src/accelerate/scheduler.py\\n+++ b/src/accelerate/scheduler.py\\n@@ -28,7 +28,7 @@ class AcceleratedScheduler:\\n to avoid making a scheduler step too fast when gradients went overflow and there was no training step (in mixed\\n precision training)\\n \\n- When performing gradient accumulation scheduler lengths should not be changed accordingly, accelerate will always\\n+ When performing gradient accumulation scheduler lengths should not be changed accordingly, Accelerate will always\\n step the scheduler to account for it.\\n \\n Args:\\ndiff --git a/src/accelerate/state.py b/src/accelerate/state.py\\nindex 051cf8bc6..a700cfad6 100644\\n--- a/src/accelerate/state.py\\n+++ b/src/accelerate/state.py\\n@@ -50,14 +50,14 @@ class AcceleratorState:\\n \"\"\"\\n Singleton class that has information about the current training environment.\\n \\n- **Attributes:**\\n+ **Available attributes:**\\n \\n - **device** (`torch.device`) -- The device to use.\\n - **distributed_type** ([`~accelerate.state.DistributedType`]) -- The type of distributed environment currently\\n in use.\\n - **local_process_index** (`int`) -- The index of the current process on the current server.\\n- - **mixed_precision** (`str`) -- Whether or not the current script will use mixed precision. If you are using\\n- mixed precision, define if you want to use FP16 or BF16 (bfloat16) as the floating point.\\n+ - **mixed_precision** (`str`) -- Whether or not the current script will use mixed precision, and if so the type\\n+ of mixed precision being performed.\\n - **num_processes** (`int`) -- The number of processes currently launched in parallel.\\n - **process_index** (`int`) -- The index of the current process.\\n \"\"\"\\n@@ -278,10 +278,11 @@ class GradientState:\\n \"\"\"\\n Singleton class that has information related to gradient synchronization for gradient accumulation\\n \\n- **Attributes:**\\n+ **Available attributes:**\\n \\n - **end_of_dataloader** (`bool`) -- Whether we have reached the end the current dataloader\\n - **remainder** (`int`) -- The number of extra samples that were added from padding the dataloader\\n+ - **sync_gradients** (`bool`) -- Whether the gradients should be synced across all devices\\n \"\"\"\\n \\n _shared_state = {}\\n@@ -310,5 +311,5 @@ def _set_end_of_dataloader(self, end_of_dataloader):\\n self.end_of_dataloader = end_of_dataloader\\n \\n def _set_remainder(self, remainder):\\n- \"Private function that sets the number of remaining samples at the end of the dataloader\"\\n+ \"Private function that sets the number of remaining samples at the end of the dataloader. Users should not have to call this.\"\\n self.remainder = remainder\\n',\n", - " 'code_comments': [],\n", - " 'context': [{'body': '_The documentation is not available anymore as the PR was closed or merged._',\n", - " 'from_author': False}],\n", - " 'url': 'https://api.github.com/repos/huggingface/accelerate/pulls/666'},\n", - " 1037561043: {'diff': 'diff --git a/README.md b/README.md\\nindex f15f442a7..deddf2986 100644\\n--- a/README.md\\n+++ b/README.md\\n@@ -243,3 +243,16 @@ pip install accelerate\\n - FP16 with native AMP (apex on the roadmap)\\n - DeepSpeed support (Experimental)\\n - PyTorch Fully Sharded Data Parallel (FSDP) support (Experimental)\\n+\\n+## Citing 🤗 Accelerate\\n+\\n+If you use 🤗 Accelerate in your publication, please cite it by using the following BibTeX entry.\\n+\\n+```bibtex\\n+@Misc{accelerate,\\n+ title = {Accelerate: Training and inference at scale made simple, efficient and adaptable.},\\n+ author = {Sylvain Gugger, Lysandre Debut, Thomas Wolf, Philipp Schmid, Zachary Mueller, Sourab Mangrulkar},\\n+ howpublished = {\\\\url{https://github.com/huggingface/accelerate}},\\n+ year = {2022}\\n+}\\n+```\\n',\n", - " 'code_comments': [],\n", - " 'context': [{'body': '_The documentation is not available anymore as the PR was closed or merged._',\n", - " 'from_author': False}],\n", - " 'url': 'https://api.github.com/repos/huggingface/accelerate/pulls/660'},\n", - " 1035923283: {'diff': 'diff --git a/src/accelerate/tracking.py b/src/accelerate/tracking.py\\nindex 839a7a2a9..028e431e1 100644\\n--- a/src/accelerate/tracking.py\\n+++ b/src/accelerate/tracking.py\\n@@ -16,9 +16,12 @@\\n # Provide a project dir name, then each type of logger gets stored in project/{`logging_dir`}\\n \\n import os\\n+import time\\n from abc import ABCMeta, abstractmethod, abstractproperty\\n from typing import List, Optional, Union\\n \\n+import yaml\\n+\\n from .logging import get_logger\\n from .utils import LoggerType, is_comet_ml_available, is_tensorboard_available, is_wandb_available\\n \\n@@ -142,7 +145,8 @@ def tracker(self):\\n \\n def store_init_configuration(self, values: dict):\\n \"\"\"\\n- Logs `values` as hyperparameters for the run. Should be run at the beginning of your experiment.\\n+ Logs `values` as hyperparameters for the run. Should be run at the beginning of your experiment. Stores the\\n+ hyperparameters in a yaml file for future use.\\n \\n Args:\\n values (Dictionary `str` to `bool`, `str`, `float` or `int`):\\n@@ -151,7 +155,16 @@ def store_init_configuration(self, values: dict):\\n \"\"\"\\n self.writer.add_hparams(values, metric_dict={})\\n self.writer.flush()\\n- logger.info(\"Stored initial configuration hyperparameters to TensorBoard\")\\n+ project_run_name = time.time()\\n+ dir_name = os.path.join(self.logging_dir, str(project_run_name))\\n+ os.makedirs(dir_name, exist_ok=True)\\n+ with open(os.path.join(dir_name, \"hparams.yml\"), \"w\") as outfile:\\n+ try:\\n+ yaml.dump(values, outfile)\\n+ except yaml.representer.RepresenterError:\\n+ logger.error(\"Serialization to store hyperparameters failed\")\\n+ raise\\n+ logger.info(\"Stored initial configuration hyperparameters to TensorBoard and hparams yaml file\")\\n \\n def log(self, values: dict, step: Optional[int] = None, **kwargs):\\n \"\"\"\\n',\n", - " 'code_comments': [{'body': 'Perhaps it may be good to make use of the `logging_dir` to send the yaml to? Or should these be stored separately normally.\\r\\n\\r\\n(This part):\\r\\n```python\\r\\nwriter = tensorboard.SummaryWriter(self.logging_dir, **kwargs)\\r\\n```\\r\\n',\n", - " 'diff_hunk': '@@ -151,7 +153,9 @@ def store_init_configuration(self, values: dict):\\n \"\"\"\\n self.writer.add_hparams(values, metric_dict={})\\n self.writer.flush()\\n- logger.info(\"Stored initial configuration hyperparameters to TensorBoard\")\\n+ with open(\"hparams.yaml\", \"w\") as outfile:',\n", - " 'from_author': False},\n", - " {'body': 'So, should I save the yaml file in the `logging_dir`?',\n", - " 'diff_hunk': '@@ -151,7 +153,9 @@ def store_init_configuration(self, values: dict):\\n \"\"\"\\n self.writer.add_hparams(values, metric_dict={})\\n self.writer.flush()\\n- logger.info(\"Stored initial configuration hyperparameters to TensorBoard\")\\n+ with open(\"hparams.yaml\", \"w\") as outfile:',\n", - " 'from_author': True},\n", - " {'body': 'Yep, as this is where users would expect it to wind up landing. E.g\\r\\n\\r\\nlogging_dir/hparams.yaml',\n", - " 'diff_hunk': '@@ -151,7 +153,9 @@ def store_init_configuration(self, values: dict):\\n \"\"\"\\n self.writer.add_hparams(values, metric_dict={})\\n self.writer.flush()\\n- logger.info(\"Stored initial configuration hyperparameters to TensorBoard\")\\n+ with open(\"hparams.yaml\", \"w\") as outfile:',\n", - " 'from_author': False},\n", - " {'body': 'Yeah makes sense. I have made the changes now.',\n", - " 'diff_hunk': '@@ -151,7 +153,9 @@ def store_init_configuration(self, values: dict):\\n \"\"\"\\n self.writer.add_hparams(values, metric_dict={})\\n self.writer.flush()\\n- logger.info(\"Stored initial configuration hyperparameters to TensorBoard\")\\n+ with open(\"hparams.yaml\", \"w\") as outfile:',\n", - " 'from_author': True},\n", - " {'body': \"Would we want to use pathlib `Path(self.logging_dir) / 'hparams.yaml'` or `os.path.join(self.logging_dir, 'hparams.yaml')` instead here for sanity purposes?\",\n", - " 'diff_hunk': '@@ -151,7 +153,9 @@ def store_init_configuration(self, values: dict):\\n \"\"\"\\n self.writer.add_hparams(values, metric_dict={})\\n self.writer.flush()\\n- logger.info(\"Stored initial configuration hyperparameters to TensorBoard\")\\n+ with open(\"self.logging_dir/hparams.yaml\", \"w\") as outfile:',\n", - " 'from_author': False},\n", - " {'body': 'Do we need to check if all keys/vals are serializable here, or are we going to let the error yaml throws be the one we present to users in this case? (not sure if this is done elsewhere in the codebase)',\n", - " 'diff_hunk': '@@ -151,7 +153,9 @@ def store_init_configuration(self, values: dict):\\n \"\"\"\\n self.writer.add_hparams(values, metric_dict={})\\n self.writer.flush()\\n- logger.info(\"Stored initial configuration hyperparameters to TensorBoard\")\\n+ with open(\"self.logging_dir/hparams.yaml\", \"w\") as outfile:\\n+ yaml.dump(values, outfile)',\n", - " 'from_author': False},\n", - " {'body': \"We do that somewhat here: https://github.com/huggingface/accelerate/blob/main/src/accelerate/tracking.py#L170-L176\\r\\n\\r\\nMaybe a try/catch that alerts the user that non-serializable bits were tried to be stored? Not sure what the try/catch error would be for that, otherwise I'm open to it just throwing the yaml error if we cannot :) \",\n", - " 'diff_hunk': '@@ -151,7 +153,9 @@ def store_init_configuration(self, values: dict):\\n \"\"\"\\n self.writer.add_hparams(values, metric_dict={})\\n self.writer.flush()\\n- logger.info(\"Stored initial configuration hyperparameters to TensorBoard\")\\n+ with open(\"self.logging_dir/hparams.yaml\", \"w\") as outfile:\\n+ yaml.dump(values, outfile)',\n", - " 'from_author': False},\n", - " {'body': 'could catch a `yaml.representer.RepresenterError` and return the message maybe?',\n", - " 'diff_hunk': '@@ -151,7 +153,9 @@ def store_init_configuration(self, values: dict):\\n \"\"\"\\n self.writer.add_hparams(values, metric_dict={})\\n self.writer.flush()\\n- logger.info(\"Stored initial configuration hyperparameters to TensorBoard\")\\n+ with open(\"self.logging_dir/hparams.yaml\", \"w\") as outfile:\\n+ yaml.dump(values, outfile)',\n", - " 'from_author': False},\n", - " {'body': \"Along with more descriptive explanation of what happened under the hood accelerate...otherwise its the same as letting yaml handle it. \\r\\n\\r\\nI'm fine with either solution, just wanted to bring it up!\",\n", - " 'diff_hunk': '@@ -151,7 +153,9 @@ def store_init_configuration(self, values: dict):\\n \"\"\"\\n self.writer.add_hparams(values, metric_dict={})\\n self.writer.flush()\\n- logger.info(\"Stored initial configuration hyperparameters to TensorBoard\")\\n+ with open(\"self.logging_dir/hparams.yaml\", \"w\") as outfile:\\n+ yaml.dump(values, outfile)',\n", - " 'from_author': False},\n", - " {'body': 'We should be careful here that `hparams.yaml` is saved separately for separate experiments. Should make sure that when you run 2 separate runs, you get 2 separate entries in the hparams dashboard of tensorboard. \\r\\n\\r\\nIf we save directly to `logging_dir/hparams.yaml` we would be overwriting every time, eh? ',\n", - " 'diff_hunk': '@@ -151,7 +153,9 @@ def store_init_configuration(self, values: dict):\\n \"\"\"\\n self.writer.add_hparams(values, metric_dict={})\\n self.writer.flush()\\n- logger.info(\"Stored initial configuration hyperparameters to TensorBoard\")\\n+ with open(\"hparams.yaml\", \"w\") as outfile:',\n", - " 'from_author': False},\n", - " {'body': 'I have changed the hardcoded path here.',\n", - " 'diff_hunk': '@@ -151,7 +153,9 @@ def store_init_configuration(self, values: dict):\\n \"\"\"\\n self.writer.add_hparams(values, metric_dict={})\\n self.writer.flush()\\n- logger.info(\"Stored initial configuration hyperparameters to TensorBoard\")\\n+ with open(\"self.logging_dir/hparams.yaml\", \"w\") as outfile:',\n", - " 'from_author': True},\n", - " {'body': 'I have added the try/catch error. ',\n", - " 'diff_hunk': '@@ -151,7 +153,9 @@ def store_init_configuration(self, values: dict):\\n \"\"\"\\n self.writer.add_hparams(values, metric_dict={})\\n self.writer.flush()\\n- logger.info(\"Stored initial configuration hyperparameters to TensorBoard\")\\n+ with open(\"self.logging_dir/hparams.yaml\", \"w\") as outfile:\\n+ yaml.dump(values, outfile)',\n", - " 'from_author': True},\n", - " {'body': 'I am not sure where to give a descriptive explanation. Should I add it as a comment or should I define it in the PR?',\n", - " 'diff_hunk': '@@ -151,7 +153,9 @@ def store_init_configuration(self, values: dict):\\n \"\"\"\\n self.writer.add_hparams(values, metric_dict={})\\n self.writer.flush()\\n- logger.info(\"Stored initial configuration hyperparameters to TensorBoard\")\\n+ with open(\"self.logging_dir/hparams.yaml\", \"w\") as outfile:\\n+ yaml.dump(values, outfile)',\n", - " 'from_author': True},\n", - " {'body': 'The default tensorboard summary writer uses time.time() to create a folder for every run. So, I added the same for hparams.yml file. Hope this is okay.',\n", - " 'diff_hunk': '@@ -151,7 +153,9 @@ def store_init_configuration(self, values: dict):\\n \"\"\"\\n self.writer.add_hparams(values, metric_dict={})\\n self.writer.flush()\\n- logger.info(\"Stored initial configuration hyperparameters to TensorBoard\")\\n+ with open(\"self.logging_dir/hparams.yaml\", \"w\") as outfile:',\n", - " 'from_author': True},\n", - " {'body': 'We should change this to actually raise the error, otherwise this will silently fail.\\r\\n\\r\\n```suggestion\\r\\n except yaml.representer.RepresenterError:\\r\\n logger.error(\"Serialization to store hyperparameters failed\")\\r\\n raise\\r\\n```\\r\\nI think just doing `logger.error` here is fine enough',\n", - " 'diff_hunk': '@@ -151,7 +154,13 @@ def store_init_configuration(self, values: dict):\\n \"\"\"\\n self.writer.add_hparams(values, metric_dict={})\\n self.writer.flush()\\n- logger.info(\"Stored initial configuration hyperparameters to TensorBoard\")\\n+ project_run_name = time.time()\\n+ with open(os.path.join(self.logging_dir, project_run_name, \"hparams.yml\"), \"w\") as outfile:\\n+ try:\\n+ yaml.dump(values, outfile)\\n+ except yaml.representer.RepresenterError:\\n+ logger.info(\"Serialization to store hyperparmeters failed\")',\n", - " 'from_author': False},\n", - " {'body': 'Doing so in the `logger.error` (see my suggestions) is good enough :) ',\n", - " 'diff_hunk': '@@ -151,7 +153,9 @@ def store_init_configuration(self, values: dict):\\n \"\"\"\\n self.writer.add_hparams(values, metric_dict={})\\n self.writer.flush()\\n- logger.info(\"Stored initial configuration hyperparameters to TensorBoard\")\\n+ with open(\"self.logging_dir/hparams.yaml\", \"w\") as outfile:\\n+ yaml.dump(values, outfile)',\n", - " 'from_author': False},\n", - " {'body': 'Yes, I have made the changes.',\n", - " 'diff_hunk': '@@ -151,7 +154,13 @@ def store_init_configuration(self, values: dict):\\n \"\"\"\\n self.writer.add_hparams(values, metric_dict={})\\n self.writer.flush()\\n- logger.info(\"Stored initial configuration hyperparameters to TensorBoard\")\\n+ project_run_name = time.time()\\n+ with open(os.path.join(self.logging_dir, project_run_name, \"hparams.yml\"), \"w\") as outfile:\\n+ try:\\n+ yaml.dump(values, outfile)\\n+ except yaml.representer.RepresenterError:\\n+ logger.info(\"Serialization to store hyperparmeters failed\")',\n", - " 'from_author': True}],\n", - " 'context': [{'body': '_The documentation is not available anymore as the PR was closed or merged._',\n", - " 'from_author': False},\n", - " {'body': 'cc @nateraw if you have any comments 🤗 ', 'from_author': False},\n", - " {'body': '@Shreyz-max cast `run_name` to a string on the yaml part and it should solve the error. E.g. `str(run_name)`\\r\\n\\r\\nGreat work on this, were almost there!',\n", - " 'from_author': False},\n", - " {'body': 'Yeah, sorry about the mistake. I did not notice this at all.',\n", - " 'from_author': True},\n", - " {'body': 'I suppose I need to create the folder.', 'from_author': True},\n", - " {'body': 'Great job! CC @nateraw for one last look, but lg2m!',\n", - " 'from_author': False}],\n", - " 'url': 'https://api.github.com/repos/huggingface/accelerate/pulls/657'},\n", - " 1035817822: {'diff': 'diff --git a/docs/source/concept_guides/gradient_synchronization.mdx b/docs/source/concept_guides/gradient_synchronization.mdx\\nindex 02b5adf0d..ea4de3d72 100644\\n--- a/docs/source/concept_guides/gradient_synchronization.mdx\\n+++ b/docs/source/concept_guides/gradient_synchronization.mdx\\n@@ -114,4 +114,6 @@ for batch in dataloader:\\n outputs = model(inputs)\\n loss = loss_function(outputs, targets)\\n accelerator.backward(loss)\\n-```\\n\\\\ No newline at end of file\\n+```\\n+\\n+As a result, you should either use *`accelerator.accumulate` or `accelerator.no_sync`* when it comes to API choice. \\n\\\\ No newline at end of file\\ndiff --git a/docs/source/usage_guides/gradient_accumulation.mdx b/docs/source/usage_guides/gradient_accumulation.mdx\\nindex bd06efed9..798e15847 100644\\n--- a/docs/source/usage_guides/gradient_accumulation.mdx\\n+++ b/docs/source/usage_guides/gradient_accumulation.mdx\\n@@ -126,3 +126,5 @@ for batch in training_dataloader:\\n scheduler.step()\\n optimizer.zero_grad()\\n ```\\n+\\n+To learn more about what magic this wraps around, read the [Gradient Synchronization concept guide](/concept_guides/gradient_synchronization)\\n\\\\ No newline at end of file\\n',\n", - " 'code_comments': [{'body': '```suggestion\\r\\nAs a result, you should either use *`accelerator.accumulate` or `accelerator.no_sync`* when it comes to API choice. \\r\\n```',\n", - " 'diff_hunk': '@@ -114,4 +114,6 @@ for batch in dataloader:\\n outputs = model(inputs)\\n loss = loss_function(outputs, targets)\\n accelerator.backward(loss)\\n-```\\n\\\\ No newline at end of file\\n+```\\n+\\n+As a result, you should either use *one or the other* when it comes to API choice. ',\n", - " 'from_author': False}],\n", - " 'context': [{'body': 'The docs for this PR live [here](/static-proxy?url=https%3A%2F%2Fmoon-ci-docs.huggingface.co%2Fdocs%2Faccelerate%2Fpr_656). All of your documentation changes will be reflected on that endpoint.',\n", - " 'from_author': False}],\n", - " 'url': 'https://api.github.com/repos/huggingface/accelerate/pulls/656'},\n", - " 1035655626: {'diff': 'diff --git a/src/accelerate/tracking.py b/src/accelerate/tracking.py\\nindex 028e431e1..5f189c326 100644\\n--- a/src/accelerate/tracking.py\\n+++ b/src/accelerate/tracking.py\\n@@ -134,8 +134,8 @@ def __init__(self, run_name: str, logging_dir: Optional[Union[str, os.PathLike]]\\n self.run_name = run_name\\n self.logging_dir = os.path.join(logging_dir, run_name)\\n self.writer = tensorboard.SummaryWriter(self.logging_dir, **kwargs)\\n- logger.info(f\"Initialized TensorBoard project {self.run_name} logging to {self.logging_dir}\")\\n- logger.info(\\n+ logger.debug(f\"Initialized TensorBoard project {self.run_name} logging to {self.logging_dir}\")\\n+ logger.debug(\\n \"Make sure to log any initial configurations with `self.store_init_configuration` before training!\"\\n )\\n \\n@@ -164,7 +164,7 @@ def store_init_configuration(self, values: dict):\\n except yaml.representer.RepresenterError:\\n logger.error(\"Serialization to store hyperparameters failed\")\\n raise\\n- logger.info(\"Stored initial configuration hyperparameters to TensorBoard and hparams yaml file\")\\n+ logger.debug(\"Stored initial configuration hyperparameters to TensorBoard and hparams yaml file\")\\n \\n def log(self, values: dict, step: Optional[int] = None, **kwargs):\\n \"\"\"\\n@@ -188,14 +188,14 @@ def log(self, values: dict, step: Optional[int] = None, **kwargs):\\n elif isinstance(v, dict):\\n self.writer.add_scalars(k, v, global_step=step, **kwargs)\\n self.writer.flush()\\n- logger.info(\"Successfully logged to TensorBoard\")\\n+ logger.debug(\"Successfully logged to TensorBoard\")\\n \\n def finish(self):\\n \"\"\"\\n Closes `TensorBoard` writer\\n \"\"\"\\n self.writer.close()\\n- logger.info(\"TensorBoard writer closed\")\\n+ logger.debug(\"TensorBoard writer closed\")\\n \\n \\n class WandBTracker(GeneralTracker):\\n@@ -215,8 +215,8 @@ class WandBTracker(GeneralTracker):\\n def __init__(self, run_name: str, **kwargs):\\n self.run_name = run_name\\n self.run = wandb.init(project=self.run_name, **kwargs)\\n- logger.info(f\"Initialized WandB project {self.run_name}\")\\n- logger.info(\\n+ logger.debug(f\"Initialized WandB project {self.run_name}\")\\n+ logger.debug(\\n \"Make sure to log any initial configurations with `self.store_init_configuration` before training!\"\\n )\\n \\n@@ -234,7 +234,7 @@ def store_init_configuration(self, values: dict):\\n `str`, `float`, `int`, or `None`.\\n \"\"\"\\n wandb.config.update(values)\\n- logger.info(\"Stored initial configuration hyperparameters to WandB\")\\n+ logger.debug(\"Stored initial configuration hyperparameters to WandB\")\\n \\n def log(self, values: dict, step: Optional[int] = None, **kwargs):\\n \"\"\"\\n@@ -250,14 +250,14 @@ def log(self, values: dict, step: Optional[int] = None, **kwargs):\\n Additional key word arguments passed along to the `wandb.log` method.\\n \"\"\"\\n self.run.log(values, step=step, **kwargs)\\n- logger.info(\"Successfully logged to WandB\")\\n+ logger.debug(\"Successfully logged to WandB\")\\n \\n def finish(self):\\n \"\"\"\\n Closes `wandb` writer\\n \"\"\"\\n self.run.finish()\\n- logger.info(\"WandB run closed\")\\n+ logger.debug(\"WandB run closed\")\\n \\n \\n class CometMLTracker(GeneralTracker):\\n@@ -279,8 +279,8 @@ class CometMLTracker(GeneralTracker):\\n def __init__(self, run_name: str, **kwargs):\\n self.run_name = run_name\\n self.writer = Experiment(project_name=run_name, **kwargs)\\n- logger.info(f\"Initialized CometML project {self.run_name}\")\\n- logger.info(\\n+ logger.debug(f\"Initialized CometML project {self.run_name}\")\\n+ logger.debug(\\n \"Make sure to log any initial configurations with `self.store_init_configuration` before training!\"\\n )\\n \\n@@ -298,7 +298,7 @@ def store_init_configuration(self, values: dict):\\n `str`, `float`, `int`, or `None`.\\n \"\"\"\\n self.writer.log_parameters(values)\\n- logger.info(\"Stored initial configuration hyperparameters to CometML\")\\n+ logger.debug(\"Stored initial configuration hyperparameters to CometML\")\\n \\n def log(self, values: dict, step: Optional[int] = None, **kwargs):\\n \"\"\"\\n@@ -323,14 +323,14 @@ def log(self, values: dict, step: Optional[int] = None, **kwargs):\\n self.writer.log_other(k, v, **kwargs)\\n elif isinstance(v, dict):\\n self.writer.log_metrics(v, step=step, **kwargs)\\n- logger.info(\"Successfully logged to CometML\")\\n+ logger.debug(\"Successfully logged to CometML\")\\n \\n def finish(self):\\n \"\"\"\\n Closes `comet-ml` writer\\n \"\"\"\\n self.writer.end()\\n- logger.info(\"CometML run closed\")\\n+ logger.debug(\"CometML run closed\")\\n \\n \\n LOGGER_TYPE_TO_CLASS = {\"tensorboard\": TensorBoardTracker, \"wandb\": WandBTracker, \"comet_ml\": CometMLTracker}\\n@@ -384,6 +384,6 @@ def filter_trackers(\\n )\\n loggers.append(log_type)\\n else:\\n- logger.info(f\"Tried adding logger {log_type}, but package is unavailable in the system.\")\\n+ logger.debug(f\"Tried adding logger {log_type}, but package is unavailable in the system.\")\\n \\n return loggers\\n',\n", - " 'code_comments': [],\n", - " 'context': [{'body': '_The documentation is not available anymore as the PR was closed or merged._',\n", - " 'from_author': False}],\n", - " 'url': 'https://api.github.com/repos/huggingface/accelerate/pulls/655'},\n", - " 1033234059: {'diff': 'diff --git a/src/accelerate/tracking.py b/src/accelerate/tracking.py\\nindex 839a7a2a9..08b988250 100644\\n--- a/src/accelerate/tracking.py\\n+++ b/src/accelerate/tracking.py\\n@@ -20,7 +20,7 @@\\n from typing import List, Optional, Union\\n \\n from .logging import get_logger\\n-from .utils import LoggerType, is_comet_ml_available, is_tensorboard_available, is_wandb_available\\n+from .utils import LoggerType, is_aim_available, is_comet_ml_available, is_tensorboard_available, is_wandb_available\\n \\n \\n _available_trackers = []\\n@@ -40,6 +40,11 @@\\n \\n _available_trackers.append(LoggerType.COMETML)\\n \\n+if is_aim_available():\\n+ from aim import Run\\n+\\n+ _available_trackers.append(LoggerType.AIM)\\n+\\n \\n logger = get_logger(__name__)\\n \\n@@ -320,7 +325,72 @@ def finish(self):\\n logger.info(\"CometML run closed\")\\n \\n \\n-LOGGER_TYPE_TO_CLASS = {\"tensorboard\": TensorBoardTracker, \"wandb\": WandBTracker, \"comet_ml\": CometMLTracker}\\n+class AimTracker(GeneralTracker):\\n+ \"\"\"\\n+ A `Tracker` class that supports `aim`. Should be initialized at the start of your script.\\n+\\n+ Args:\\n+ run_name (`str`):\\n+ The name of the experiment run.\\n+ kwargs:\\n+ Additional key word arguments passed along to the `Run.__init__` method.\\n+ \"\"\"\\n+\\n+ name = \"aim\"\\n+ requires_logging_directory = True\\n+\\n+ def __init__(self, run_name: str, logging_dir: Optional[Union[str, os.PathLike]] = \".\", **kwargs):\\n+ self.run_name = run_name\\n+ self.writer = Run(repo=logging_dir, **kwargs)\\n+ self.writer.name = self.run_name\\n+ logger.debug(f\"Initialized Aim project {self.run_name}\")\\n+ logger.debug(\\n+ \"Make sure to log any initial configurations with `self.store_init_configuration` before training!\"\\n+ )\\n+\\n+ @property\\n+ def tracker(self):\\n+ return self.writer\\n+\\n+ def store_init_configuration(self, values: dict):\\n+ \"\"\"\\n+ Logs `values` as hyperparameters for the run. Should be run at the beginning of your experiment.\\n+\\n+ Args:\\n+ values (`dict`):\\n+ Values to be stored as initial hyperparameters as key-value pairs.\\n+ \"\"\"\\n+ self.writer[\"hparams\"] = values\\n+\\n+ def log(self, values: dict, step: Optional[int], **kwargs):\\n+ \"\"\"\\n+ Logs `values` to the current run.\\n+\\n+ Args:\\n+ values (`dict`):\\n+ Values to be logged as key-value pairs.\\n+ step (`int`, *optional*):\\n+ The run step. If included, the log will be affiliated with this step.\\n+ kwargs:\\n+ Additional key word arguments passed along to the `Run.track` method.\\n+ \"\"\"\\n+ # Note: replace this with the dictionary support when merged\\n+ for key, value in values.items():\\n+ self.writer.track(value, name=key, step=step, **kwargs)\\n+\\n+ def finish(self):\\n+ \"\"\"\\n+ Closes `aim` writer\\n+ \"\"\"\\n+ self.writer.close()\\n+\\n+\\n+LOGGER_TYPE_TO_CLASS = {\\n+ \"aim\": AimTracker,\\n+ \"comet_ml\": CometMLTracker,\\n+ \"tensorboard\": TensorBoardTracker,\\n+ \"wandb\": WandBTracker,\\n+}\\n \\n \\n def filter_trackers(\\ndiff --git a/src/accelerate/utils/__init__.py b/src/accelerate/utils/__init__.py\\nindex 2b8f566f0..ae5af1c3f 100644\\n--- a/src/accelerate/utils/__init__.py\\n+++ b/src/accelerate/utils/__init__.py\\n@@ -20,6 +20,7 @@\\n )\\n from .imports import (\\n get_ccl_version,\\n+ is_aim_available,\\n is_apex_available,\\n is_bf16_available,\\n is_boto3_available,\\ndiff --git a/src/accelerate/utils/dataclasses.py b/src/accelerate/utils/dataclasses.py\\nindex 5f425f200..9f6e30bec 100644\\n--- a/src/accelerate/utils/dataclasses.py\\n+++ b/src/accelerate/utils/dataclasses.py\\n@@ -196,6 +196,7 @@ class LoggerType(BaseEnum):\\n \"\"\"\\n \\n ALL = \"all\"\\n+ AIM = \"aim\"\\n TENSORBOARD = \"tensorboard\"\\n WANDB = \"wandb\"\\n COMETML = \"comet_ml\"\\ndiff --git a/src/accelerate/utils/imports.py b/src/accelerate/utils/imports.py\\nindex 074d02e4a..6239e3deb 100644\\n--- a/src/accelerate/utils/imports.py\\n+++ b/src/accelerate/utils/imports.py\\n@@ -93,6 +93,10 @@ def is_datasets_available():\\n return importlib.util.find_spec(\"datasets\") is not None\\n \\n \\n+def is_aim_available():\\n+ return importlib.util.find_spec(\"aim\") is not None\\n+\\n+\\n def is_tensorboard_available():\\n return importlib.util.find_spec(\"tensorboard\") is not None or importlib.util.find_spec(\"tensorboardX\") is not None\\n \\n',\n", - " 'code_comments': [{'body': '@muellerzr the `finalize` method is more for the SDK internal use (it handles storage indexing), which will become a private method in the upcoming releases.\\r\\nThe resources cleanup is done in `close` method, which should be used to properly finalize the run:\\r\\n```py\\r\\nself.writer.close()\\r\\n```',\n", - " 'diff_hunk': '@@ -320,7 +325,74 @@ def finish(self):\\n logger.info(\"CometML run closed\")\\n \\n \\n-LOGGER_TYPE_TO_CLASS = {\"tensorboard\": TensorBoardTracker, \"wandb\": WandBTracker, \"comet_ml\": CometMLTracker}\\n+class AimTracker(GeneralTracker):\\n+ \"\"\"\\n+ A `Tracker` class that supports `aim`. Should be initialized at the start of your script.\\n+\\n+ Args:\\n+ run_name (`str`):\\n+ The name of the experiment run.\\n+ kwargs:\\n+ Additional key word arguments passed along to the `Run.__init__` method.\\n+ \"\"\"\\n+\\n+ name = \"aim\"\\n+ requires_logging_directory = True\\n+\\n+ def __init__(self, run_name: str, logging_dir: Optional[Union[str, os.PathLike]] = \".\", **kwargs):\\n+ self.run_name = run_name\\n+ self.writer = Run(repo=logging_dir, experiment=run_name, **kwargs)\\n+ logger.info(f\"Initialized Aim project {self.run_name}\")\\n+ logger.info(\\n+ \"Make sure to log any initial configurations with `self.store_init_configuration` before training!\"\\n+ )\\n+\\n+ @property\\n+ def tracker(self):\\n+ return self.writer\\n+\\n+ def store_init_configuration(self, values: dict):\\n+ \"\"\"\\n+ Logs `values` as hyperparameters for the run. Should be run at the beginning of your experiment.\\n+\\n+ Args:\\n+ values (`dict`):\\n+ Values to be stored as initial hyperparameters as key-value pairs.\\n+ \"\"\"\\n+ self.writer[\"hparams\"].update(values)\\n+\\n+ def log(self, values: dict, step: Optional[int], **kwargs):\\n+ \"\"\"\\n+ Logs `values` to the current run.\\n+\\n+ Args:\\n+ values (`dict`):\\n+ Values to be logged as key-value pairs.\\n+ step (`int`, *optional*):\\n+ The run step. If included, the log will be affiliated with this step.\\n+ kwargs (`dict`, *optional*):\\n+ Additional key word arguments passed along to the `Run.track` method. Valid keys include `context` and\\n+ `epoch`.\\n+ \"\"\"\\n+ # Note: replace this with the dictionary support when merged\\n+ context = kwargs.pop(\"context\", {})\\n+ epoch = kwargs.pop(\"epoch\", None)\\n+ for key, value in values.items():\\n+ self.writer.track(value, key, step=step, epoch=epoch, context=context)\\n+\\n+ def finish(self):\\n+ \"\"\"\\n+ Closes `aim` writer\\n+ \"\"\"\\n+ self.writer.finalize()',\n", - " 'from_author': False},\n", - " {'body': 'What about passing the name argument as a keyword argument? I guess it will be more readable and will minimize the risk of forward incompatibilities. Thoughts? 🤔 ',\n", - " 'diff_hunk': '@@ -320,7 +325,74 @@ def finish(self):\\n logger.info(\"CometML run closed\")\\n \\n \\n-LOGGER_TYPE_TO_CLASS = {\"tensorboard\": TensorBoardTracker, \"wandb\": WandBTracker, \"comet_ml\": CometMLTracker}\\n+class AimTracker(GeneralTracker):\\n+ \"\"\"\\n+ A `Tracker` class that supports `aim`. Should be initialized at the start of your script.\\n+\\n+ Args:\\n+ run_name (`str`):\\n+ The name of the experiment run.\\n+ kwargs:\\n+ Additional key word arguments passed along to the `Run.__init__` method.\\n+ \"\"\"\\n+\\n+ name = \"aim\"\\n+ requires_logging_directory = True\\n+\\n+ def __init__(self, run_name: str, logging_dir: Optional[Union[str, os.PathLike]] = \".\", **kwargs):\\n+ self.run_name = run_name\\n+ self.writer = Run(repo=logging_dir, experiment=run_name, **kwargs)\\n+ logger.info(f\"Initialized Aim project {self.run_name}\")\\n+ logger.info(\\n+ \"Make sure to log any initial configurations with `self.store_init_configuration` before training!\"\\n+ )\\n+\\n+ @property\\n+ def tracker(self):\\n+ return self.writer\\n+\\n+ def store_init_configuration(self, values: dict):\\n+ \"\"\"\\n+ Logs `values` as hyperparameters for the run. Should be run at the beginning of your experiment.\\n+\\n+ Args:\\n+ values (`dict`):\\n+ Values to be stored as initial hyperparameters as key-value pairs.\\n+ \"\"\"\\n+ self.writer[\"hparams\"].update(values)\\n+\\n+ def log(self, values: dict, step: Optional[int], **kwargs):\\n+ \"\"\"\\n+ Logs `values` to the current run.\\n+\\n+ Args:\\n+ values (`dict`):\\n+ Values to be logged as key-value pairs.\\n+ step (`int`, *optional*):\\n+ The run step. If included, the log will be affiliated with this step.\\n+ kwargs (`dict`, *optional*):\\n+ Additional key word arguments passed along to the `Run.track` method. Valid keys include `context` and\\n+ `epoch`.\\n+ \"\"\"\\n+ # Note: replace this with the dictionary support when merged\\n+ context = kwargs.pop(\"context\", {})\\n+ epoch = kwargs.pop(\"epoch\", None)\\n+ for key, value in values.items():\\n+ self.writer.track(value, key, step=step, epoch=epoch, context=context)',\n", - " 'from_author': False},\n", - " {'body': 'The experiment is mainly used to group related runs together (e.g. often used in sweeps, hparams search). The experiment is not a mandatory argument, it defaults to `\"default\"`.\\r\\n\\r\\nI would recommend the `init` method to receive the repo path and the run name and pass the rest of arguments as kwargs to the Aim Run:\\r\\n```py\\r\\ndef __init__(self, run_name: str, logging_dir: Optional[Union[str, os.PathLike]] = \".\", **kwargs):\\r\\n ...\\r\\n\\r\\n # Init the run - specify the logs dir and pass kwargs (including experiment) \\r\\n self.writer = Run(repo=logging_dir, **kwargs)\\r\\n\\r\\n # Assign the run name\\r\\n self.writer.name = run_name\\r\\n```\\r\\n\\r\\n',\n", - " 'diff_hunk': '@@ -320,7 +325,74 @@ def finish(self):\\n logger.info(\"CometML run closed\")\\n \\n \\n-LOGGER_TYPE_TO_CLASS = {\"tensorboard\": TensorBoardTracker, \"wandb\": WandBTracker, \"comet_ml\": CometMLTracker}\\n+class AimTracker(GeneralTracker):\\n+ \"\"\"\\n+ A `Tracker` class that supports `aim`. Should be initialized at the start of your script.\\n+\\n+ Args:\\n+ run_name (`str`):\\n+ The name of the experiment run.\\n+ kwargs:\\n+ Additional key word arguments passed along to the `Run.__init__` method.\\n+ \"\"\"\\n+\\n+ name = \"aim\"\\n+ requires_logging_directory = True\\n+\\n+ def __init__(self, run_name: str, logging_dir: Optional[Union[str, os.PathLike]] = \".\", **kwargs):',\n", - " 'from_author': False},\n", - " {'body': \"If you're worried about that, we can absolutely do so. Easy to achieve!\",\n", - " 'diff_hunk': '@@ -320,7 +325,74 @@ def finish(self):\\n logger.info(\"CometML run closed\")\\n \\n \\n-LOGGER_TYPE_TO_CLASS = {\"tensorboard\": TensorBoardTracker, \"wandb\": WandBTracker, \"comet_ml\": CometMLTracker}\\n+class AimTracker(GeneralTracker):\\n+ \"\"\"\\n+ A `Tracker` class that supports `aim`. Should be initialized at the start of your script.\\n+\\n+ Args:\\n+ run_name (`str`):\\n+ The name of the experiment run.\\n+ kwargs:\\n+ Additional key word arguments passed along to the `Run.__init__` method.\\n+ \"\"\"\\n+\\n+ name = \"aim\"\\n+ requires_logging_directory = True\\n+\\n+ def __init__(self, run_name: str, logging_dir: Optional[Union[str, os.PathLike]] = \".\", **kwargs):\\n+ self.run_name = run_name\\n+ self.writer = Run(repo=logging_dir, experiment=run_name, **kwargs)\\n+ logger.info(f\"Initialized Aim project {self.run_name}\")\\n+ logger.info(\\n+ \"Make sure to log any initial configurations with `self.store_init_configuration` before training!\"\\n+ )\\n+\\n+ @property\\n+ def tracker(self):\\n+ return self.writer\\n+\\n+ def store_init_configuration(self, values: dict):\\n+ \"\"\"\\n+ Logs `values` as hyperparameters for the run. Should be run at the beginning of your experiment.\\n+\\n+ Args:\\n+ values (`dict`):\\n+ Values to be stored as initial hyperparameters as key-value pairs.\\n+ \"\"\"\\n+ self.writer[\"hparams\"].update(values)\\n+\\n+ def log(self, values: dict, step: Optional[int], **kwargs):\\n+ \"\"\"\\n+ Logs `values` to the current run.\\n+\\n+ Args:\\n+ values (`dict`):\\n+ Values to be logged as key-value pairs.\\n+ step (`int`, *optional*):\\n+ The run step. If included, the log will be affiliated with this step.\\n+ kwargs (`dict`, *optional*):\\n+ Additional key word arguments passed along to the `Run.track` method. Valid keys include `context` and\\n+ `epoch`.\\n+ \"\"\"\\n+ # Note: replace this with the dictionary support when merged\\n+ context = kwargs.pop(\"context\", {})\\n+ epoch = kwargs.pop(\"epoch\", None)\\n+ for key, value in values.items():\\n+ self.writer.track(value, key, step=step, epoch=epoch, context=context)',\n", - " 'from_author': True},\n", - " {'body': \"Interesting. I've been doing it the opposite way in my experiments. So e.g. if I have 3 trials of X hyperparameter, I'd just name it `run_hparam_x` *not* `run_hyparam_x_1`?\",\n", - " 'diff_hunk': '@@ -320,7 +325,74 @@ def finish(self):\\n logger.info(\"CometML run closed\")\\n \\n \\n-LOGGER_TYPE_TO_CLASS = {\"tensorboard\": TensorBoardTracker, \"wandb\": WandBTracker, \"comet_ml\": CometMLTracker}\\n+class AimTracker(GeneralTracker):\\n+ \"\"\"\\n+ A `Tracker` class that supports `aim`. Should be initialized at the start of your script.\\n+\\n+ Args:\\n+ run_name (`str`):\\n+ The name of the experiment run.\\n+ kwargs:\\n+ Additional key word arguments passed along to the `Run.__init__` method.\\n+ \"\"\"\\n+\\n+ name = \"aim\"\\n+ requires_logging_directory = True\\n+\\n+ def __init__(self, run_name: str, logging_dir: Optional[Union[str, os.PathLike]] = \".\", **kwargs):',\n", - " 'from_author': True},\n", - " {'body': \"@gorarakelyan I'd recommend updating this example, as this is where I got it from :) Will use close\\r\\n\\r\\nhttps://colab.research.google.com/drive/14rIAjpEyklf5fSMiRbyZs6iYG7IVibcI?usp=sharing#scrollTo=qOu7icths6n5\",\n", - " 'diff_hunk': '@@ -320,7 +325,74 @@ def finish(self):\\n logger.info(\"CometML run closed\")\\n \\n \\n-LOGGER_TYPE_TO_CLASS = {\"tensorboard\": TensorBoardTracker, \"wandb\": WandBTracker, \"comet_ml\": CometMLTracker}\\n+class AimTracker(GeneralTracker):\\n+ \"\"\"\\n+ A `Tracker` class that supports `aim`. Should be initialized at the start of your script.\\n+\\n+ Args:\\n+ run_name (`str`):\\n+ The name of the experiment run.\\n+ kwargs:\\n+ Additional key word arguments passed along to the `Run.__init__` method.\\n+ \"\"\"\\n+\\n+ name = \"aim\"\\n+ requires_logging_directory = True\\n+\\n+ def __init__(self, run_name: str, logging_dir: Optional[Union[str, os.PathLike]] = \".\", **kwargs):\\n+ self.run_name = run_name\\n+ self.writer = Run(repo=logging_dir, experiment=run_name, **kwargs)\\n+ logger.info(f\"Initialized Aim project {self.run_name}\")\\n+ logger.info(\\n+ \"Make sure to log any initial configurations with `self.store_init_configuration` before training!\"\\n+ )\\n+\\n+ @property\\n+ def tracker(self):\\n+ return self.writer\\n+\\n+ def store_init_configuration(self, values: dict):\\n+ \"\"\"\\n+ Logs `values` as hyperparameters for the run. Should be run at the beginning of your experiment.\\n+\\n+ Args:\\n+ values (`dict`):\\n+ Values to be stored as initial hyperparameters as key-value pairs.\\n+ \"\"\"\\n+ self.writer[\"hparams\"].update(values)\\n+\\n+ def log(self, values: dict, step: Optional[int], **kwargs):\\n+ \"\"\"\\n+ Logs `values` to the current run.\\n+\\n+ Args:\\n+ values (`dict`):\\n+ Values to be logged as key-value pairs.\\n+ step (`int`, *optional*):\\n+ The run step. If included, the log will be affiliated with this step.\\n+ kwargs (`dict`, *optional*):\\n+ Additional key word arguments passed along to the `Run.track` method. Valid keys include `context` and\\n+ `epoch`.\\n+ \"\"\"\\n+ # Note: replace this with the dictionary support when merged\\n+ context = kwargs.pop(\"context\", {})\\n+ epoch = kwargs.pop(\"epoch\", None)\\n+ for key, value in values.items():\\n+ self.writer.track(value, key, step=step, epoch=epoch, context=context)\\n+\\n+ def finish(self):\\n+ \"\"\"\\n+ Closes `aim` writer\\n+ \"\"\"\\n+ self.writer.finalize()',\n", - " 'from_author': True},\n", - " {'body': '@muellerzr Yup, makes sense. What about assigning trial hashes (or ids) to run names and the sweep name to experiment? Like this:\\r\\n\\r\\n| Run name | Experiment |\\r\\n| --- | --- |\\r\\n| trial_1 | run_hparam_x |\\r\\n| trial_2 | run_hparam_x |\\r\\n| trial_3 | run_hparam_x |\\r\\n| --- | --- |\\r\\n| trial_1 | run_hparam_y |\\r\\n| trial_2 | run_hparam_y |\\r\\n| trial_3 | run_hparam_y |',\n", - " 'diff_hunk': '@@ -320,7 +325,74 @@ def finish(self):\\n logger.info(\"CometML run closed\")\\n \\n \\n-LOGGER_TYPE_TO_CLASS = {\"tensorboard\": TensorBoardTracker, \"wandb\": WandBTracker, \"comet_ml\": CometMLTracker}\\n+class AimTracker(GeneralTracker):\\n+ \"\"\"\\n+ A `Tracker` class that supports `aim`. Should be initialized at the start of your script.\\n+\\n+ Args:\\n+ run_name (`str`):\\n+ The name of the experiment run.\\n+ kwargs:\\n+ Additional key word arguments passed along to the `Run.__init__` method.\\n+ \"\"\"\\n+\\n+ name = \"aim\"\\n+ requires_logging_directory = True\\n+\\n+ def __init__(self, run_name: str, logging_dir: Optional[Union[str, os.PathLike]] = \".\", **kwargs):',\n", - " 'from_author': False},\n", - " {'body': \"Sounds good! I'll switch it to this as that makes perfect sense. \",\n", - " 'diff_hunk': '@@ -320,7 +325,74 @@ def finish(self):\\n logger.info(\"CometML run closed\")\\n \\n \\n-LOGGER_TYPE_TO_CLASS = {\"tensorboard\": TensorBoardTracker, \"wandb\": WandBTracker, \"comet_ml\": CometMLTracker}\\n+class AimTracker(GeneralTracker):\\n+ \"\"\"\\n+ A `Tracker` class that supports `aim`. Should be initialized at the start of your script.\\n+\\n+ Args:\\n+ run_name (`str`):\\n+ The name of the experiment run.\\n+ kwargs:\\n+ Additional key word arguments passed along to the `Run.__init__` method.\\n+ \"\"\"\\n+\\n+ name = \"aim\"\\n+ requires_logging_directory = True\\n+\\n+ def __init__(self, run_name: str, logging_dir: Optional[Union[str, os.PathLike]] = \".\", **kwargs):',\n", - " 'from_author': True},\n", - " {'body': '@muellerzr The last minor thing - the `run_hash` is a unique auto-generated id of a run. It would be better to store the name as a `run.name`. Like this:\\r\\n```py\\r\\nself.writer = Run(repo=logging_dir, **kwargs)\\r\\nself.writer.name = run_name\\r\\n```',\n", - " 'diff_hunk': '@@ -320,7 +325,71 @@ def finish(self):\\n logger.info(\"CometML run closed\")\\n \\n \\n-LOGGER_TYPE_TO_CLASS = {\"tensorboard\": TensorBoardTracker, \"wandb\": WandBTracker, \"comet_ml\": CometMLTracker}\\n+class AimTracker(GeneralTracker):\\n+ \"\"\"\\n+ A `Tracker` class that supports `aim`. Should be initialized at the start of your script.\\n+\\n+ Args:\\n+ run_name (`str`):\\n+ The name of the experiment run.\\n+ kwargs:\\n+ Additional key word arguments passed along to the `Run.__init__` method.\\n+ \"\"\"\\n+\\n+ name = \"aim\"\\n+ requires_logging_directory = True\\n+\\n+ def __init__(self, run_name: str, logging_dir: Optional[Union[str, os.PathLike]] = \".\", **kwargs):\\n+ self.run_name = run_name\\n+ self.writer = Run(run_hash=run_name, repo=logging_dir, **kwargs)',\n", - " 'from_author': False},\n", - " {'body': 'Fixed, thanks!',\n", - " 'diff_hunk': '@@ -320,7 +325,71 @@ def finish(self):\\n logger.info(\"CometML run closed\")\\n \\n \\n-LOGGER_TYPE_TO_CLASS = {\"tensorboard\": TensorBoardTracker, \"wandb\": WandBTracker, \"comet_ml\": CometMLTracker}\\n+class AimTracker(GeneralTracker):\\n+ \"\"\"\\n+ A `Tracker` class that supports `aim`. Should be initialized at the start of your script.\\n+\\n+ Args:\\n+ run_name (`str`):\\n+ The name of the experiment run.\\n+ kwargs:\\n+ Additional key word arguments passed along to the `Run.__init__` method.\\n+ \"\"\"\\n+\\n+ name = \"aim\"\\n+ requires_logging_directory = True\\n+\\n+ def __init__(self, run_name: str, logging_dir: Optional[Union[str, os.PathLike]] = \".\", **kwargs):\\n+ self.run_name = run_name\\n+ self.writer = Run(run_hash=run_name, repo=logging_dir, **kwargs)',\n", - " 'from_author': True},\n", - " {'body': 'Found one issue during testing. As Aim Run doesn\\'t have an `update` method, let\\'s just use a simple assignment here:\\r\\n```py\\r\\nself.writer[\"hparams\"] = values\\r\\n```',\n", - " 'diff_hunk': '@@ -320,7 +325,72 @@ def finish(self):\\n logger.info(\"CometML run closed\")\\n \\n \\n-LOGGER_TYPE_TO_CLASS = {\"tensorboard\": TensorBoardTracker, \"wandb\": WandBTracker, \"comet_ml\": CometMLTracker}\\n+class AimTracker(GeneralTracker):\\n+ \"\"\"\\n+ A `Tracker` class that supports `aim`. Should be initialized at the start of your script.\\n+\\n+ Args:\\n+ run_name (`str`):\\n+ The name of the experiment run.\\n+ kwargs:\\n+ Additional key word arguments passed along to the `Run.__init__` method.\\n+ \"\"\"\\n+\\n+ name = \"aim\"\\n+ requires_logging_directory = True\\n+\\n+ def __init__(self, run_name: str, logging_dir: Optional[Union[str, os.PathLike]] = \".\", **kwargs):\\n+ self.run_name = run_name\\n+ self.writer = Run(repo=logging_dir, **kwargs)\\n+ self.writer.name = self.run_name\\n+ logger.info(f\"Initialized Aim project {self.run_name}\")\\n+ logger.info(\\n+ \"Make sure to log any initial configurations with `self.store_init_configuration` before training!\"\\n+ )\\n+\\n+ @property\\n+ def tracker(self):\\n+ return self.writer\\n+\\n+ def store_init_configuration(self, values: dict):\\n+ \"\"\"\\n+ Logs `values` as hyperparameters for the run. Should be run at the beginning of your experiment.\\n+\\n+ Args:\\n+ values (`dict`):\\n+ Values to be stored as initial hyperparameters as key-value pairs.\\n+ \"\"\"\\n+ self.writer[\"hparams\"].update(values)',\n", - " 'from_author': False},\n", - " {'body': 'Makes sense, will update with this. Thanks for checking!',\n", - " 'diff_hunk': '@@ -320,7 +325,72 @@ def finish(self):\\n logger.info(\"CometML run closed\")\\n \\n \\n-LOGGER_TYPE_TO_CLASS = {\"tensorboard\": TensorBoardTracker, \"wandb\": WandBTracker, \"comet_ml\": CometMLTracker}\\n+class AimTracker(GeneralTracker):\\n+ \"\"\"\\n+ A `Tracker` class that supports `aim`. Should be initialized at the start of your script.\\n+\\n+ Args:\\n+ run_name (`str`):\\n+ The name of the experiment run.\\n+ kwargs:\\n+ Additional key word arguments passed along to the `Run.__init__` method.\\n+ \"\"\"\\n+\\n+ name = \"aim\"\\n+ requires_logging_directory = True\\n+\\n+ def __init__(self, run_name: str, logging_dir: Optional[Union[str, os.PathLike]] = \".\", **kwargs):\\n+ self.run_name = run_name\\n+ self.writer = Run(repo=logging_dir, **kwargs)\\n+ self.writer.name = self.run_name\\n+ logger.info(f\"Initialized Aim project {self.run_name}\")\\n+ logger.info(\\n+ \"Make sure to log any initial configurations with `self.store_init_configuration` before training!\"\\n+ )\\n+\\n+ @property\\n+ def tracker(self):\\n+ return self.writer\\n+\\n+ def store_init_configuration(self, values: dict):\\n+ \"\"\"\\n+ Logs `values` as hyperparameters for the run. Should be run at the beginning of your experiment.\\n+\\n+ Args:\\n+ values (`dict`):\\n+ Values to be stored as initial hyperparameters as key-value pairs.\\n+ \"\"\"\\n+ self.writer[\"hparams\"].update(values)',\n", - " 'from_author': True},\n", - " {'body': 'Now that #655 was merged, this should be adapted.',\n", - " 'diff_hunk': '@@ -320,7 +325,72 @@ def finish(self):\\n logger.info(\"CometML run closed\")\\n \\n \\n-LOGGER_TYPE_TO_CLASS = {\"tensorboard\": TensorBoardTracker, \"wandb\": WandBTracker, \"comet_ml\": CometMLTracker}\\n+class AimTracker(GeneralTracker):\\n+ \"\"\"\\n+ A `Tracker` class that supports `aim`. Should be initialized at the start of your script.\\n+\\n+ Args:\\n+ run_name (`str`):\\n+ The name of the experiment run.\\n+ kwargs:\\n+ Additional key word arguments passed along to the `Run.__init__` method.\\n+ \"\"\"\\n+\\n+ name = \"aim\"\\n+ requires_logging_directory = True\\n+\\n+ def __init__(self, run_name: str, logging_dir: Optional[Union[str, os.PathLike]] = \".\", **kwargs):\\n+ self.run_name = run_name\\n+ self.writer = Run(repo=logging_dir, **kwargs)\\n+ self.writer.name = self.run_name\\n+ logger.info(f\"Initialized Aim project {self.run_name}\")\\n+ logger.info(',\n", - " 'from_author': False}],\n", - " 'context': [{'body': '_The documentation is not available anymore as the PR was closed or merged._',\n", - " 'from_author': False},\n", - " {'body': '@muellerzr the tracker looks great. Is there any example training script, which I can run to test out the integration? Thanks a million!',\n", - " 'from_author': False},\n", - " {'body': '@gorarakelyan you can follow the directions here for installing the git version of accelerate + configuring it for your system:\\r\\nhttps://huggingface.co/docs/accelerate/basic_tutorials/install\\r\\n\\r\\nAnd then feel free to run the `nlp_example.py` script found in the examples directory here: https://github.com/huggingface/accelerate/blob/main/examples/nlp_example.py\\r\\n\\r\\nJust make sure to also install `transformers` and `datasets` first :) ',\n", - " 'from_author': True},\n", - " {'body': '@muellerzr thanks, will test it out soon! 🤗 ',\n", - " 'from_author': False}],\n", - " 'url': 'https://api.github.com/repos/huggingface/accelerate/pulls/649'},\n", - " 1031721951: {'diff': 'diff --git a/README.md b/README.md\\nindex c67d5dd50..f15f442a7 100644\\n--- a/README.md\\n+++ b/README.md\\n@@ -196,7 +196,7 @@ from accelerate import notebook_launcher\\n notebook_launcher(training_function)\\n ```\\n \\n-An example can be found in [this notebook](https://github.com/huggingface/notebooks/blob/master/examples/accelerate/simple_nlp_example.ipynb). [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/huggingface/notebooks/blob/master/examples/accelerate/simple_nlp_example.ipynb)\\n+An example can be found in [this notebook](https://github.com/huggingface/notebooks/blob/main/examples/accelerate_examples/simple_nlp_example.ipynb). [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/accelerate_examples/simple_nlp_example.ipynb)\\n \\n ## Why should I use 🤗 Accelerate?\\n \\n',\n", - " 'code_comments': [],\n", - " 'context': [{'body': '_The documentation is not available anymore as the PR was closed or merged._',\n", - " 'from_author': False}],\n", - " 'url': 'https://api.github.com/repos/huggingface/accelerate/pulls/645'},\n", - " 1031255408: {'diff': 'diff --git a/examples/by_feature/deepspeed_with_config_support.py b/examples/by_feature/deepspeed_with_config_support.py\\nindex fd8a8fa82..89234a911 100755\\n--- a/examples/by_feature/deepspeed_with_config_support.py\\n+++ b/examples/by_feature/deepspeed_with_config_support.py\\n@@ -588,14 +588,12 @@ def group_texts(examples):\\n checkpointing_steps = None\\n \\n # We need to initialize the trackers we use, and also store our configuration.\\n- # We initialize the trackers only on main process because `accelerator.log`\\n- # only logs on main process and we don\\'t want empty logs/runs on other processes.\\n+ # The trackers initializes automatically on the main process.\\n if args.with_tracking:\\n- if accelerator.is_main_process:\\n- experiment_config = vars(args)\\n- # TensorBoard cannot log Enums, need the raw value\\n- experiment_config[\"lr_scheduler_type\"] = experiment_config[\"lr_scheduler_type\"].value\\n- accelerator.init_trackers(\"clm_no_trainer\", experiment_config)\\n+ experiment_config = vars(args)\\n+ # TensorBoard cannot log Enums, need the raw value\\n+ experiment_config[\"lr_scheduler_type\"] = experiment_config[\"lr_scheduler_type\"].value\\n+ accelerator.init_trackers(\"clm_no_trainer\", experiment_config)\\n \\n # Train!\\n total_batch_size = args.per_device_train_batch_size * accelerator.num_processes * args.gradient_accumulation_steps\\ndiff --git a/examples/by_feature/fsdp_with_peak_mem_tracking.py b/examples/by_feature/fsdp_with_peak_mem_tracking.py\\nindex 274ccdfce..4ae8e917e 100644\\n--- a/examples/by_feature/fsdp_with_peak_mem_tracking.py\\n+++ b/examples/by_feature/fsdp_with_peak_mem_tracking.py\\n@@ -113,7 +113,7 @@ def training_function(config, args):\\n batch_size = int(config[\"batch_size\"])\\n \\n # We need to initialize the trackers we use, and also store our configuration\\n- if args.with_tracking and accelerator.is_main_process:\\n+ if args.with_tracking:\\n experiment_config = vars(args)\\n accelerator.init_trackers(\"fsdp_glue_no_trainer\", experiment_config)\\n \\ndiff --git a/examples/by_feature/tracking.py b/examples/by_feature/tracking.py\\nindex 76ad7a64b..e4467697c 100644\\n--- a/examples/by_feature/tracking.py\\n+++ b/examples/by_feature/tracking.py\\n@@ -166,8 +166,8 @@ def training_function(config, args):\\n )\\n \\n # New Code #\\n- # We need to initalize the trackers we use. Overall configurations can also be stored\\n- if args.with_tracking and accelerator.is_main_process:\\n+ # We need to initialize the trackers we use. Overall configurations can also be stored\\n+ if args.with_tracking:\\n run = os.path.split(__file__)[-1].split(\".\")[0]\\n accelerator.init_trackers(run, config)\\n \\ndiff --git a/examples/complete_cv_example.py b/examples/complete_cv_example.py\\nindex 1b86e66ee..880981594 100644\\n--- a/examples/complete_cv_example.py\\n+++ b/examples/complete_cv_example.py\\n@@ -103,7 +103,7 @@ def training_function(config, args):\\n checkpointing_steps = None\\n \\n # We need to initialize the trackers we use, and also store our configuration\\n- if args.with_tracking and accelerator.is_main_process:\\n+ if args.with_tracking:\\n run = os.path.split(__file__)[-1].split(\".\")[0]\\n accelerator.init_trackers(run, config)\\n \\ndiff --git a/examples/complete_nlp_example.py b/examples/complete_nlp_example.py\\nindex 93e6764f6..559a5c963 100644\\n--- a/examples/complete_nlp_example.py\\n+++ b/examples/complete_nlp_example.py\\n@@ -75,7 +75,7 @@ def training_function(config, args):\\n batch_size = int(config[\"batch_size\"])\\n \\n # We need to initialize the trackers we use, and also store our configuration\\n- if args.with_tracking and accelerator.is_main_process:\\n+ if args.with_tracking:\\n run = os.path.split(__file__)[-1].split(\".\")[0]\\n accelerator.init_trackers(run, config)\\n \\n',\n", - " 'code_comments': [],\n", - " 'context': [{'body': '_The documentation is not available anymore as the PR was closed or merged._',\n", - " 'from_author': False},\n", - " {'body': 'Thanks! Do you want to try to update the complete examples in this too? (The last spot would be in the docs, but I can update that :) )',\n", - " 'from_author': False},\n", - " {'body': 'Sure, I will update all the example scripts in the `examples` folder in this PR :)',\n", - " 'from_author': True},\n", - " {'body': '@Gladiator07 great! It should just be the complete_nlp_example and complete_cv_example',\n", - " 'from_author': False},\n", - " {'body': '@muellerzr but many of the example scripts have the `is_main_process` check (for instance [here](https://github.com/huggingface/accelerate/blob/a3d94916a87d07471865261d015128870f513c38/examples/by_feature/deepspeed_with_config_support.py#L594)). Should I update them as well or just `complete_nlp_example` and `complete_cv_example` ?',\n", - " 'from_author': True},\n", - " {'body': 'Ah, great point! Yes in that case those would be great to have done as well! :)',\n", - " 'from_author': False},\n", - " {'body': 'Hi @muellerzr, I have removed the `is_main_process` check from all example scripts. Please check it once.',\n", - " 'from_author': True}],\n", - " 'url': 'https://api.github.com/repos/huggingface/accelerate/pulls/643'},\n", - " 1031120240: {'diff': 'diff --git a/src/accelerate/accelerator.py b/src/accelerate/accelerator.py\\nindex 8261524fd..4991ddbe4 100644\\n--- a/src/accelerate/accelerator.py\\n+++ b/src/accelerate/accelerator.py\\n@@ -1032,6 +1032,7 @@ def wait_for_everyone(self):\\n \"\"\"\\n wait_for_everyone()\\n \\n+ @on_main_process\\n def init_trackers(self, project_name: str, config: Optional[dict] = None, init_kwargs: Optional[dict] = {}):\\n \"\"\"\\n Initializes a run for all trackers stored in `self.log_with`, potentially with starting configurations\\n',\n", - " 'code_comments': [],\n", - " 'context': [{'body': '_The documentation is not available anymore as the PR was closed or merged._',\n", - " 'from_author': False},\n", - " {'body': '@Gladiator07 could you adjust the following code in the examples as well? Since this removes the need for the check:\\r\\n\\r\\nhttps://github.com/huggingface/accelerate/blob/main/examples/by_feature/tracking.py#L168-L172',\n", - " 'from_author': False},\n", - " {'body': '@muellerzr adjust as in? Should I remove the `accelerator.is_main_process` check?',\n", - " 'from_author': True},\n", - " {'body': \"@Gladiator07 correct. We're putting this in place so that is no longer needed, so the examples should show this :)\",\n", - " 'from_author': False},\n", - " {'body': 'Cool, doing this in a separate PR.', 'from_author': True}],\n", - " 'url': 'https://api.github.com/repos/huggingface/accelerate/pulls/642'},\n", - " 1026629204: {'diff': 'diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml\\nindex 806f0094d..3fec513d2 100644\\n--- a/.github/workflows/test.yml\\n+++ b/.github/workflows/test.yml\\n@@ -11,6 +11,10 @@ jobs:\\n runs-on: ubuntu-latest\\n strategy:\\n matrix:\\n+ pytorch-version: [\\n+ latest,\\n+ minimum\\n+ ]\\n test-kind: [\\n test_prod,\\n test_core,\\n@@ -43,6 +47,7 @@ jobs:\\n if [[ ${{ matrix.test-kind }} = test_prod ]]; then pip install -e .[test_prod]; fi\\n if [[ ${{ matrix.test-kind }} != test_prod ]]; then pip install -e .[testing,test_trackers]; fi\\n if [[ ${{ matrix.test-kind }} = test_rest ]]; then pip uninstall comet_ml -y; fi\\n+ if [[ ${{ matrix.pytorch-version }} = minimum ]]; then pip install torch==1.6.0; fi\\n \\n - name: Run Tests\\n run: |\\ndiff --git a/src/accelerate/accelerator.py b/src/accelerate/accelerator.py\\nindex 353ccf894..27db19b9b 100644\\n--- a/src/accelerate/accelerator.py\\n+++ b/src/accelerate/accelerator.py\\n@@ -279,9 +279,7 @@ def __init__(\\n self.native_amp = False\\n err = \"{mode} mixed precision requires {requirement}\"\\n if self.state.mixed_precision == \"fp16\":\\n- self.native_amp = is_torch_version(\">=\", \"1.6\")\\n- if not self.native_amp:\\n- raise ValueError(err.format(mode=\"fp16\", requirement=\"PyTorch >= 1.6\"))\\n+ self.native_amp = True\\n if not torch.cuda.is_available() and not parse_flag_from_env(\"USE_MPS_DEVICE\"):\\n raise ValueError(err.format(mode=\"fp16\", requirement=\"a GPU\"))\\n kwargs = self.scaler_handler.to_kwargs() if self.scaler_handler is not None else {}\\n@@ -314,7 +312,7 @@ def __init__(\\n # RNG Types\\n self.rng_types = rng_types\\n if self.rng_types is None:\\n- self.rng_types = [\"torch\"] if is_torch_version(\"<=\", \"1.5.1\") else [\"generator\"]\\n+ self.rng_types = [\"generator\"]\\n \\n @property\\n def use_distributed(self):\\ndiff --git a/src/accelerate/big_modeling.py b/src/accelerate/big_modeling.py\\nindex 311111ad4..6643fa579 100644\\n--- a/src/accelerate/big_modeling.py\\n+++ b/src/accelerate/big_modeling.py\\n@@ -29,6 +29,7 @@\\n load_checkpoint_in_model,\\n offload_state_dict,\\n )\\n+from .utils.versions import is_torch_version\\n \\n \\n @contextmanager\\n@@ -59,6 +60,8 @@ def init_empty_weights(include_buffers: bool = False):\\n \\n \\n \"\"\"\\n+ if not is_torch_version(\">=\", \"1.9.0\"):\\n+ raise NotImplementedError(\"Initializing empty weights to a meta device requires torch >= 1.9.0\")\\n old_register_parameter = nn.Module.register_parameter\\n if include_buffers:\\n old_register_buffer = nn.Module.register_buffer\\n@@ -114,6 +117,8 @@ def cpu_offload(\\n called directly during the forward, for instance if a `dense` linear layer is registered, but at forward,\\n `dense.weight` and `dense.bias` are used in some operations instead of calling `dense` directly.\\n \"\"\"\\n+ if not is_torch_version(\">=\", \"1.9.0\"):\\n+ raise NotImplementedError(\"CPU offloading requires torch >= 1.9.0\")\\n if execution_device is None:\\n execution_device = next(iter(model.parameters())).device\\n if state_dict is None:\\n@@ -157,6 +162,8 @@ def disk_offload(\\n called directly during the forward, for instance if a `dense` linear layer is registered, but at forward,\\n `dense.weight` and `dense.bias` are used in some operations instead of calling `dense` directly.\\n \"\"\"\\n+ if not is_torch_version(\">=\", \"1.9.0\"):\\n+ raise NotImplementedError(\"Disk offloading requires torch >= 1.9.0\")\\n if not os.path.isdir(offload_dir) or not os.path.isfile(os.path.join(offload_dir, \"index.json\")):\\n offload_state_dict(offload_dir, model.state_dict())\\n if execution_device is None:\\n@@ -208,6 +215,8 @@ def dispatch_model(\\n called directly during the forward, for instance if a `dense` linear layer is registered, but at forward,\\n `dense.weight` and `dense.bias` are used in some operations instead of calling `dense` directly.\\n \"\"\"\\n+ if not is_torch_version(\">=\", \"1.9.0\"):\\n+ raise NotImplementedError(\"Model dispatching requires torch >= 1.9.0\")\\n # Error early if the device map is incomplete.\\n check_device_map(model, device_map)\\n \\n@@ -304,6 +313,8 @@ def load_checkpoint_and_dispatch(\\n called directly during the forward, for instance if a `dense` linear layer is registered, but at forward,\\n `dense.weight` and `dense.bias` are used in some operations instead of calling `dense` directly.\\n \"\"\"\\n+ if not is_torch_version(\">=\", \"1.9.0\"):\\n+ raise NotImplementedError(\"Loading and dispatching requires torch >= 1.9.0\")\\n if isinstance(device_map, str) and device_map not in [\"auto\", \"balanced\", \"balanced_low_0\", \"sequential\"]:\\n raise ValueError(\\n \"If passing a string for `device_map`, please choose \\'auto\\', \\'balanced\\', \\'balanced_low_0\\' or \"\\ndiff --git a/src/accelerate/data_loader.py b/src/accelerate/data_loader.py\\nindex b8152b302..9236ee310 100644\\n--- a/src/accelerate/data_loader.py\\n+++ b/src/accelerate/data_loader.py\\n@@ -75,11 +75,11 @@ def total_dataset_length(self):\\n \"timeout\": 0,\\n \"worker_init_fn\": None,\\n \"multiprocessing_context\": None,\\n+ \"generator\": None,\\n }\\n \\n # kwargs added after by version\\n _PYTORCH_DATALOADER_ADDITIONAL_KWARGS = {\\n- \"1.6.0\": {\"generator\": None},\\n \"1.7.0\": {\"prefetch_factor\": 2, \"persistent_workers\": False},\\n }\\n \\n@@ -412,7 +412,7 @@ def __init__(self, dataset, split_batches: bool = False, _drop_last: bool = Fals\\n self.split_batches = split_batches\\n if is_torch_version(\"<\", \"1.8.0\"):\\n raise ImportError(\\n- \"Using `DataLoaderDispatcher` requires PyTorch 1.8.0 minimum. You have {torch.__version__}.\"\\n+ f\"Using `DataLoaderDispatcher` requires PyTorch 1.8.0 minimum. You have {torch.__version__}.\"\\n )\\n if shuffle:\\n torch.utils.data.graph_settings.apply_shuffle_settings(dataset, shuffle=shuffle)\\ndiff --git a/src/accelerate/launchers.py b/src/accelerate/launchers.py\\nindex 8ddc9af34..dcdead956 100644\\n--- a/src/accelerate/launchers.py\\n+++ b/src/accelerate/launchers.py\\n@@ -20,7 +20,7 @@\\n import torch\\n \\n from .state import AcceleratorState\\n-from .utils import PrecisionType, PrepareForLaunch, is_torch_version, patch_environment\\n+from .utils import PrecisionType, PrepareForLaunch, patch_environment\\n \\n \\n def notebook_launcher(function, args=(), num_processes=None, use_fp16=False, mixed_precision=\"no\", use_port=\"29500\"):\\n@@ -90,12 +90,6 @@ def notebook_launcher(function, args=(), num_processes=None, use_fp16=False, mix\\n \\n if num_processes > 1:\\n # Multi-GPU launch\\n- if is_torch_version(\"<\", \"1.5.0\"):\\n- raise ImportError(\\n- \"Using `notebook_launcher` for distributed training on GPUs require torch >= 1.5.0, got \"\\n- f\"{torch.__version__}.\"\\n- )\\n-\\n from torch.multiprocessing import start_processes\\n \\n if len(AcceleratorState._shared_state) > 0:\\n@@ -154,12 +148,6 @@ def debug_launcher(function, args=(), num_processes=2):\\n num_processes (`int`, *optional*, defaults to 2):\\n The number of processes to use for training.\\n \"\"\"\\n- if is_torch_version(\"<\", \"1.5.0\"):\\n- raise ImportError(\\n- \"Using `debug_launcher` for distributed training on GPUs require torch >= 1.5.0, got \"\\n- f\"{torch.__version__}.\"\\n- )\\n-\\n from torch.multiprocessing import start_processes\\n \\n with tempfile.NamedTemporaryFile() as tmp_file:\\ndiff --git a/src/accelerate/test_utils/__init__.py b/src/accelerate/test_utils/__init__.py\\nindex 43d5ed7ef..faf8a7da8 100644\\n--- a/src/accelerate/test_utils/__init__.py\\n+++ b/src/accelerate/test_utils/__init__.py\\n@@ -10,6 +10,7 @@\\n require_huggingface_suite,\\n require_multi_gpu,\\n require_single_gpu,\\n+ require_torch_min_version,\\n require_tpu,\\n skip,\\n slow,\\ndiff --git a/src/accelerate/test_utils/scripts/test_script.py b/src/accelerate/test_utils/scripts/test_script.py\\nindex 07d794b42..6897d9084 100644\\n--- a/src/accelerate/test_utils/scripts/test_script.py\\n+++ b/src/accelerate/test_utils/scripts/test_script.py\\n@@ -46,10 +46,9 @@ def rng_sync_check():\\n if state.distributed_type == DistributedType.MULTI_GPU:\\n synchronize_rng_states([\"cuda\"])\\n assert are_the_same_tensors(torch.cuda.get_rng_state()), \"RNG states improperly synchronized on GPU.\"\\n- if is_torch_version(\">=\", \"1.6.0\"):\\n- generator = torch.Generator()\\n- synchronize_rng_states([\"generator\"], generator=generator)\\n- assert are_the_same_tensors(generator.get_state()), \"RNG states improperly synchronized in generator.\"\\n+ generator = torch.Generator()\\n+ synchronize_rng_states([\"generator\"], generator=generator)\\n+ assert are_the_same_tensors(generator.get_state()), \"RNG states improperly synchronized in generator.\"\\n \\n if state.local_process_index == 0:\\n print(\"All rng are properly synched.\")\\n@@ -339,7 +338,7 @@ def main():\\n if state.local_process_index == 0:\\n print(\"\\\\n**DataLoader integration test**\")\\n dl_preparation_check()\\n- if state.distributed_type != DistributedType.TPU:\\n+ if state.distributed_type != DistributedType.TPU and is_torch_version(\">=\", \"1.8.0\"):\\n central_dl_preparation_check()\\n \\n # Trainings are not exactly the same in DeepSpeed and CPU mode\\ndiff --git a/src/accelerate/test_utils/testing.py b/src/accelerate/test_utils/testing.py\\nindex 381f92c58..94e13412a 100644\\n--- a/src/accelerate/test_utils/testing.py\\n+++ b/src/accelerate/test_utils/testing.py\\n@@ -20,6 +20,7 @@\\n import tempfile\\n import unittest\\n from distutils.util import strtobool\\n+from functools import partial\\n from pathlib import Path\\n from typing import List, Union\\n from unittest import mock\\n@@ -132,6 +133,16 @@ def require_fsdp(test_case):\\n return unittest.skipUnless(is_torch_version(\">=\", \"1.12.0\"), \"test requires torch version >= 1.12.0\")(test_case)\\n \\n \\n+def require_torch_min_version(test_case=None, version=None):\\n+ \"\"\"\\n+ Decorator marking that a test requires a particular torch version to be tested. These tests are skipped when an\\n+ installed torch version is less than the required one.\\n+ \"\"\"\\n+ if test_case is None:\\n+ return partial(require_torch_min_version, version=version)\\n+ return unittest.skipUnless(is_torch_version(\">=\", version), f\"test requires torch version >= {version}\")(test_case)\\n+\\n+\\n def require_tensorboard(test_case):\\n \"\"\"\\n Decorator marking a test that requires tensorboard installed. These tests are skipped when tensorboard isn\\'t\\ndiff --git a/tests/test_big_modeling.py b/tests/test_big_modeling.py\\nindex 4f3054b73..4f738f4d8 100644\\n--- a/tests/test_big_modeling.py\\n+++ b/tests/test_big_modeling.py\\n@@ -27,7 +27,7 @@\\n load_checkpoint_and_dispatch,\\n )\\n from accelerate.hooks import remove_hook_from_submodules\\n-from accelerate.test_utils import require_cuda, require_multi_gpu, slow\\n+from accelerate.test_utils import require_cuda, require_multi_gpu, require_torch_min_version, slow\\n from accelerate.utils import offload_state_dict\\n from transformers import AutoModelForCausalLM, AutoTokenizer\\n \\n@@ -79,6 +79,7 @@ def forward(self, x):\\n return self.linear4(self.linear3(self.batchnorm(self.linear2(self.linear1(x)))))\\n \\n \\n+@require_torch_min_version(version=\"1.9.0\")\\n class BigModelingTester(unittest.TestCase):\\n def test_init_empty_weights(self):\\n # base use\\ndiff --git a/tests/test_hooks.py b/tests/test_hooks.py\\nindex 2e0519668..9d48db9e1 100644\\n--- a/tests/test_hooks.py\\n+++ b/tests/test_hooks.py\\n@@ -27,7 +27,7 @@\\n remove_hook_from_module,\\n remove_hook_from_submodules,\\n )\\n-from accelerate.test_utils import require_multi_gpu\\n+from accelerate.test_utils import require_multi_gpu, require_torch_min_version\\n \\n \\n class ModelForTest(nn.Module):\\n@@ -51,6 +51,7 @@ def post_forward(self, module, output):\\n return output + 1\\n \\n \\n+@require_torch_min_version(version=\"1.9.0\")\\n class HooksModelTester(unittest.TestCase):\\n def test_add_and_remove_hooks(self):\\n test_model = ModelForTest()\\ndiff --git a/tests/test_metrics.py b/tests/test_metrics.py\\nindex b26ef00b2..9dd098456 100644\\n--- a/tests/test_metrics.py\\n+++ b/tests/test_metrics.py\\n@@ -26,11 +26,13 @@\\n require_huggingface_suite,\\n require_multi_gpu,\\n require_single_gpu,\\n+ require_torch_min_version,\\n )\\n from accelerate.utils import get_launch_prefix, patch_environment\\n \\n \\n @require_huggingface_suite\\n+@require_torch_min_version(version=\"1.8.0\")\\n class MetricTester(unittest.TestCase):\\n def setUp(self):\\n mod_file = inspect.getfile(accelerate.test_utils)\\ndiff --git a/tests/test_modeling_utils.py b/tests/test_modeling_utils.py\\nindex f5c36ee3d..1c6f6088d 100644\\n--- a/tests/test_modeling_utils.py\\n+++ b/tests/test_modeling_utils.py\\n@@ -21,6 +21,7 @@\\n import torch.nn as nn\\n \\n from accelerate.test_utils import require_cuda, require_multi_gpu\\n+from accelerate.test_utils.testing import require_torch_min_version\\n from accelerate.utils.modeling import (\\n check_device_map,\\n clean_device_map,\\n@@ -45,6 +46,7 @@ def forward(self, x):\\n return self.linear2(self.batchnorm(self.linear1(x)))\\n \\n \\n+@require_torch_min_version(version=\"1.9.0\")\\n class ModelingUtilsTester(unittest.TestCase):\\n def check_set_module_tensor_for_device(self, model, device1, device2):\\n self.assertEqual(model.linear1.weight.device, torch.device(device1))\\n',\n", - " 'code_comments': [{'body': 'It should be possible to import those regardless of the version (and then the actual use of it would trigger the error).',\n", - " 'diff_hunk': '@@ -16,7 +15,18 @@\\n InitProcessGroupKwargs,\\n find_executable_batch_size,\\n infer_auto_device_map,\\n+ is_torch_version,\\n load_checkpoint_in_model,\\n rich,\\n synchronize_rng_states,\\n )\\n+\\n+\\n+if is_torch_version(\">=\", \"1.9.0\"):\\n+ from .big_modeling import (\\n+ cpu_offload,\\n+ disk_offload,\\n+ dispatch_model,\\n+ init_empty_weights,\\n+ load_checkpoint_and_dispatch,\\n+ )',\n", - " 'from_author': False},\n", - " {'body': \"Let's leave the type hint but as a forward reference?\",\n", - " 'diff_hunk': '@@ -48,7 +49,7 @@ def save_accelerator_state(\\n optimizers: list,\\n schedulers: list,\\n process_index: int,\\n- scaler: GradScaler = None,\\n+ scaler=None,',\n", - " 'from_author': False},\n", - " {'body': \"```suggestion\\r\\ndef require_torch_min_version(test_case=None, version=None):\\r\\n```\\r\\nNaming isn't completely right IMO.\",\n", - " 'diff_hunk': '@@ -132,6 +133,16 @@ def require_fsdp(test_case):\\n return unittest.skipUnless(is_torch_version(\">=\", \"1.12.0\"), \"test requires torch version >= 1.12.0\")(test_case)\\n \\n \\n+def require_torch_version(test_case=None, version=None):',\n", - " 'from_author': False},\n", - " {'body': \"Not needed since we're raising the min torch\",\n", - " 'diff_hunk': '@@ -48,7 +49,7 @@ def save_accelerator_state(\\n optimizers: list,\\n schedulers: list,\\n process_index: int,\\n- scaler: GradScaler = None,\\n+ scaler=None,',\n", - " 'from_author': True},\n", - " {'body': 'Fixed',\n", - " 'diff_hunk': '@@ -16,7 +15,18 @@\\n InitProcessGroupKwargs,\\n find_executable_batch_size,\\n infer_auto_device_map,\\n+ is_torch_version,\\n load_checkpoint_in_model,\\n rich,\\n synchronize_rng_states,\\n )\\n+\\n+\\n+if is_torch_version(\">=\", \"1.9.0\"):\\n+ from .big_modeling import (\\n+ cpu_offload,\\n+ disk_offload,\\n+ dispatch_model,\\n+ init_empty_weights,\\n+ load_checkpoint_and_dispatch,\\n+ )',\n", - " 'from_author': True}],\n", - " 'context': [{'body': '_The documentation is not available anymore as the PR was closed or merged._',\n", - " 'from_author': False}],\n", - " 'url': 'https://api.github.com/repos/huggingface/accelerate/pulls/638'},\n", - " 1026327065: {'diff': 'diff --git a/src/accelerate/utils/dataclasses.py b/src/accelerate/utils/dataclasses.py\\nindex cbbaeae16..5f425f200 100644\\n--- a/src/accelerate/utils/dataclasses.py\\n+++ b/src/accelerate/utils/dataclasses.py\\n@@ -60,6 +60,8 @@ class DistributedDataParallelKwargs(KwargsHandler):\\n \\n `gradient_as_bucket_view` is only available in PyTorch 1.7.0 and later versions.\\n \\n+ `static_graph` is only available in PyTorch 1.11.0 and later versions.\\n+\\n \"\"\"\\n \\n dim: int = 0\\n@@ -68,6 +70,7 @@ class DistributedDataParallelKwargs(KwargsHandler):\\n find_unused_parameters: bool = False\\n check_reduction: bool = False\\n gradient_as_bucket_view: bool = False\\n+ static_graph: bool = False\\n \\n \\n @dataclass\\n',\n", - " 'code_comments': [],\n", - " 'context': [{'body': '_The documentation is not available anymore as the PR was closed or merged._',\n", - " 'from_author': False},\n", - " {'body': 'Thanks! Could we add a warning to this please similar to the other one that exists for these kwargs in the docstring mentioning its only available on pytorch>=1.11?',\n", - " 'from_author': False},\n", - " {'body': 'Sure, done', 'from_author': True},\n", - " {'body': '@rom1504 looks like the last thing we need is to run `make style; make quality` :) ',\n", - " 'from_author': False},\n", - " {'body': '@muellerzr fixed', 'from_author': True},\n", - " {'body': 'Great! Thanks!!', 'from_author': False}],\n", - " 'url': 'https://api.github.com/repos/huggingface/accelerate/pulls/637'},\n", - " 1025316276: {'diff': 'diff --git a/src/accelerate/commands/launch.py b/src/accelerate/commands/launch.py\\nindex 11aadc8e2..fcc80bb7f 100644\\n--- a/src/accelerate/commands/launch.py\\n+++ b/src/accelerate/commands/launch.py\\n@@ -26,7 +26,6 @@\\n from typing import Dict, List\\n \\n import torch\\n-import torch.distributed.run as distrib_run\\n \\n import psutil\\n from accelerate.commands.config import default_config_file, load_config_from_file\\n@@ -50,6 +49,10 @@\\n from rich.logging import RichHandler\\n \\n \\n+if is_torch_version(\">=\", \"1.9.0\"):\\n+ import torch.distributed.run as distrib_run\\n+\\n+\\n FORMAT = \"%(message)s\"\\n logging.basicConfig(format=FORMAT, datefmt=\"[%X]\", handlers=[RichHandler()])\\n \\n',\n", - " 'code_comments': [],\n", - " 'context': [{'body': '_The documentation is not available anymore as the PR was closed or merged._',\n", - " 'from_author': False},\n", - " {'body': 'Thanks for fixing!', 'from_author': False}],\n", - " 'url': 'https://api.github.com/repos/huggingface/accelerate/pulls/634'},\n", - " 1025122718: {'diff': 'diff --git a/src/accelerate/commands/config/cluster.py b/src/accelerate/commands/config/cluster.py\\nindex 1ba5839e2..65c0e56b2 100644\\n--- a/src/accelerate/commands/config/cluster.py\\n+++ b/src/accelerate/commands/config/cluster.py\\n@@ -37,6 +37,7 @@ def get_cluster_input():\\n num_machines = 1\\n main_process_ip = None\\n main_process_port = None\\n+ rdzv_backend = \"static\"\\n if distributed_type in [DistributedType.MULTI_GPU, DistributedType.MULTI_CPU]:\\n num_machines = _ask_field(\\n \"How many different machines will you use (use more than 1 for multi-node training)? [1]: \",\\n@@ -56,6 +57,9 @@ def get_cluster_input():\\n \"What is the port you will use to communicate with the main process? \",\\n lambda x: int(x),\\n )\\n+ rdzv_backend = _ask_field(\\n+ \"What rendezvous backend will you use? (\\'static\\', \\'c10d\\', ...)\", default=\"static\"\\n+ )\\n \\n if distributed_type == DistributedType.NO:\\n use_cpu = _ask_field(\\n@@ -323,4 +327,5 @@ def get_cluster_input():\\n deepspeed_config=deepspeed_config,\\n fsdp_config=fsdp_config,\\n use_cpu=use_cpu,\\n+ rdzv_backend=rdzv_backend,\\n )\\ndiff --git a/src/accelerate/commands/config/config_args.py b/src/accelerate/commands/config/config_args.py\\nindex 11ca0ca90..e78aad181 100644\\n--- a/src/accelerate/commands/config/config_args.py\\n+++ b/src/accelerate/commands/config/config_args.py\\n@@ -137,6 +137,7 @@ class ClusterConfig(BaseConfig):\\n num_machines: int = 1\\n main_process_ip: Optional[str] = None\\n main_process_port: Optional[int] = None\\n+ rdzv_backend: Optional[str] = \"static\"\\n main_training_function: str = \"main\"\\n \\n # args for deepspeed_plugin\\ndiff --git a/src/accelerate/commands/launch.py b/src/accelerate/commands/launch.py\\nindex ea27afb75..11aadc8e2 100644\\n--- a/src/accelerate/commands/launch.py\\n+++ b/src/accelerate/commands/launch.py\\n@@ -26,13 +26,13 @@\\n from typing import Dict, List\\n \\n import torch\\n+import torch.distributed.run as distrib_run\\n \\n import psutil\\n from accelerate.commands.config import default_config_file, load_config_from_file\\n from accelerate.commands.config.config_args import SageMakerConfig\\n from accelerate.state import get_int_from_env\\n from accelerate.utils import (\\n- TORCH_LAUNCH_PARAMS,\\n ComputeEnvironment,\\n DistributedType,\\n PrecisionType,\\n@@ -50,9 +50,6 @@\\n from rich.logging import RichHandler\\n \\n \\n-if is_torch_version(\">=\", \"1.9.0\"):\\n- import torch.distributed.run as distrib_run\\n-\\n FORMAT = \"%(message)s\"\\n logging.basicConfig(format=FORMAT, datefmt=\"[%X]\", handlers=[RichHandler()])\\n \\n@@ -379,8 +376,7 @@ def multi_gpu_launcher(args):\\n setattr(args, \"nproc_per_node\", str(num_processes // num_machines))\\n setattr(args, \"nnodes\", str(num_machines))\\n setattr(args, \"node_rank\", str(args.machine_rank))\\n- setattr(args, \"master_addr\", str(args.main_process_ip))\\n- setattr(args, \"master_port\", str(args.main_process_port))\\n+ setattr(args, \"rdzv_endpoint\", f\"{args.main_process_ip}:{args.main_process_port}\")\\n else:\\n setattr(args, \"nproc_per_node\", str(num_processes))\\n if args.main_process_port is not None:\\n@@ -451,33 +447,19 @@ def multi_gpu_launcher(args):\\n if args.fsdp_state_dict_type is not None:\\n current_env[\"FSDP_STATE_DICT_TYPE\"] = str(args.fsdp_state_dict_type)\\n current_env[\"OMP_NUM_THREADS\"] = str(args.num_cpu_threads_per_process)\\n- if is_torch_version(\">=\", \"1.9.0\"):\\n- debug = getattr(args, \"debug\", False)\\n- args = _filter_args(args)\\n- with patch_environment(**current_env):\\n- console = get_console()\\n-\\n- try:\\n- distrib_run.run(args)\\n- except:\\n- if debug:\\n- console.print(\"\\\\n[bold red]Using --debug, `torch.distributed` Stack Trace:[/bold red]\")\\n- console.print_exception(suppress=[__file__], show_locals=False)\\n- else:\\n- # We still have to use subprocess, the user won\\'t get a clean traceback as a result\\n- cmd = get_launch_prefix()\\n- for k, v in vars(args).items():\\n- if k in TORCH_LAUNCH_PARAMS and v:\\n- param = [f\"--{k}\"]\\n- if type(v) != bool:\\n- param.append(v)\\n- cmd.extend(param)\\n- cmd.append(args.training_script)\\n- cmd.extend(args.training_script_args)\\n- process = subprocess.Popen(cmd, env=current_env)\\n- process.wait()\\n- if process.returncode != 0:\\n- raise subprocess.CalledProcessError(returncode=process.returncode, cmd=cmd)\\n+ if is_torch_version(\"<\", \"1.9.0\"):\\n+ raise NotImplementedError(\"Multi-node training requires pytorch>=1.9.0\")\\n+\\n+ debug = getattr(args, \"debug\", False)\\n+ args = _filter_args(args)\\n+ with patch_environment(**current_env):\\n+ try:\\n+ distrib_run.run(args)\\n+ except:\\n+ if debug:\\n+ console = get_console()\\n+ console.print(\"\\\\n[bold red]Using --debug, `torch.distributed` Stack Trace:[/bold red]\")\\n+ console.print_exception(suppress=[__file__], show_locals=False)\\n \\n \\n def deepspeed_launcher(args):\\n',\n", - " 'code_comments': [],\n", - " 'context': [{'body': '_The documentation is not available anymore as the PR was closed or merged._',\n", - " 'from_author': False}],\n", - " 'url': 'https://api.github.com/repos/huggingface/accelerate/pulls/631'},\n", - " 1024597998: {'diff': 'diff --git a/examples/complete_cv_example.py b/examples/complete_cv_example.py\\nindex 99d109bd1..1b86e66ee 100644\\n--- a/examples/complete_cv_example.py\\n+++ b/examples/complete_cv_example.py\\n@@ -105,8 +105,6 @@ def training_function(config, args):\\n # We need to initialize the trackers we use, and also store our configuration\\n if args.with_tracking and accelerator.is_main_process:\\n run = os.path.split(__file__)[-1].split(\".\")[0]\\n- if args.logging_dir:\\n- run = os.path.join(args.logging_dir, run)\\n accelerator.init_trackers(run, config)\\n \\n # Grab all the image filenames\\ndiff --git a/examples/complete_nlp_example.py b/examples/complete_nlp_example.py\\nindex 094f6cfb5..93e6764f6 100644\\n--- a/examples/complete_nlp_example.py\\n+++ b/examples/complete_nlp_example.py\\n@@ -77,8 +77,6 @@ def training_function(config, args):\\n # We need to initialize the trackers we use, and also store our configuration\\n if args.with_tracking and accelerator.is_main_process:\\n run = os.path.split(__file__)[-1].split(\".\")[0]\\n- if args.logging_dir:\\n- run = os.path.join(args.logging_dir, run)\\n accelerator.init_trackers(run, config)\\n \\n tokenizer = AutoTokenizer.from_pretrained(\"bert-base-cased\")\\ndiff --git a/src/accelerate/state.py b/src/accelerate/state.py\\nindex 4e0900eb4..051cf8bc6 100644\\n--- a/src/accelerate/state.py\\n+++ b/src/accelerate/state.py\\n@@ -251,7 +251,7 @@ def __repr__(self):\\n if self.distributed_type == DistributedType.DEEPSPEED:\\n repr += f\"ds_config: {self.deepspeed_plugin.deepspeed_config}\\\\n\"\\n else:\\n- f\"Mixed precision type: {mixed_precision}\\\\n\"\\n+ repr += f\"Mixed precision type: {mixed_precision}\\\\n\"\\n return repr\\n \\n # For backward compatibility\\n',\n", - " 'code_comments': [],\n", - " 'context': [{'body': '_The documentation is not available anymore as the PR was closed or merged._',\n", - " 'from_author': False}],\n", - " 'url': 'https://api.github.com/repos/huggingface/accelerate/pulls/630'},\n", - " 1024594829: {'diff': 'diff --git a/docs/source/usage_guides/mps.mdx b/docs/source/usage_guides/mps.mdx\\nindex 43a00f104..3648bdce8 100644\\n--- a/docs/source/usage_guides/mps.mdx\\n+++ b/docs/source/usage_guides/mps.mdx\\n@@ -72,8 +72,9 @@ accelerate launch /examples/cv_example.py --data_dir images\\n \\n ## A few caveats to be aware of\\n \\n-1. For `nlp_example.py` the metrics are too bad when compared to CPU-only training. \\n-This means certain operations in BERT model are going wrong using mps device and this needs to be fixed by PyTorch.\\n+1. We strongly recommend to install PyTorch >= 1.13 (nightly version at the time of writing) on your MacOS machine. \\n+It has major fixes related to model correctness and performance improvements for transformer based models.\\n+Please refer to https://github.com/pytorch/pytorch/issues/82707 for more details.\\n 2. Distributed setups `gloo` and `nccl` are not working with `mps` device. \\n This means that currently only single GPU of `mps` device type can be used.\\n \\n',\n", - " 'code_comments': [],\n", - " 'context': [{'body': '_The documentation is not available anymore as the PR was closed or merged._',\n", - " 'from_author': False}],\n", - " 'url': 'https://api.github.com/repos/huggingface/accelerate/pulls/629'},\n", - " 1024080683: {'diff': 'diff --git a/src/accelerate/commands/launch.py b/src/accelerate/commands/launch.py\\nindex 7cb4529d9..541bda48e 100644\\n--- a/src/accelerate/commands/launch.py\\n+++ b/src/accelerate/commands/launch.py\\n@@ -367,7 +367,7 @@ def multi_gpu_launcher(args):\\n if num_machines > 1:\\n setattr(args, \"nproc_per_node\", str(num_processes // num_machines))\\n setattr(args, \"nnodes\", str(num_machines))\\n- setattr(args, \"machine_rank\", str(args.machine_rank))\\n+ setattr(args, \"node_rank\", str(args.machine_rank))\\n setattr(args, \"master_addr\", str(args.main_process_ip))\\n setattr(args, \"master_port\", str(args.main_process_port))\\n else:\\n@@ -441,16 +441,16 @@ def multi_gpu_launcher(args):\\n current_env[\"FSDP_STATE_DICT_TYPE\"] = str(args.fsdp_state_dict_type)\\n current_env[\"OMP_NUM_THREADS\"] = str(args.num_cpu_threads_per_process)\\n if is_torch_version(\">=\", \"1.9.0\"):\\n- distrib_args = _filter_args(args)\\n+ args = _filter_args(args)\\n with patch_environment(**current_env):\\n- distrib_run.run(distrib_args)\\n+ distrib_run.run(args)\\n else:\\n # We still have to use subprocess, the user won\\'t get a clean traceback as a result\\n cmd = get_launch_prefix()\\n for k, v in vars(args).items():\\n if k in TORCH_LAUNCH_PARAMS and v:\\n param = [f\"--{k}\"]\\n- if not v:\\n+ if type(v) != bool:\\n param.append(v)\\n cmd.extend(param)\\n cmd.append(args.training_script)\\ndiff --git a/src/accelerate/utils/launch.py b/src/accelerate/utils/launch.py\\nindex c6605ee26..1a452ca63 100644\\n--- a/src/accelerate/utils/launch.py\\n+++ b/src/accelerate/utils/launch.py\\n@@ -46,16 +46,12 @@ def _filter_args(args):\\n Filters out all `accelerate` specific args\\n \"\"\"\\n distrib_args = distrib_run.get_args_parser()\\n- known_args, _ = distrib_args.parse_known_args()\\n- for arg in list(vars(args).keys()):\\n- if arg not in vars(known_args).keys():\\n- delattr(args, arg)\\n- distrib_args = distrib_run.parse_args(vars(args))\\n+ new_args, _ = distrib_args.parse_known_args()\\n+\\n for key, value in vars(args).items():\\n- setattr(distrib_args, key, value)\\n- if is_torch_version(\"<\", \"1.9.0\"):\\n- setattr(distrib_args, \"use_env\", True)\\n- return distrib_args\\n+ if key in vars(new_args).keys():\\n+ setattr(new_args, key, value)\\n+ return new_args\\n \\n \\n class PrepareForLaunch:\\n',\n", - " 'code_comments': [],\n", - " 'context': [{'body': '_The documentation is not available anymore as the PR was closed or merged._',\n", - " 'from_author': False}],\n", - " 'url': 'https://api.github.com/repos/huggingface/accelerate/pulls/627'},\n", - " 1023919762: {'diff': 'diff --git a/src/accelerate/commands/launch.py b/src/accelerate/commands/launch.py\\nindex c7f235843..8196413c5 100644\\n--- a/src/accelerate/commands/launch.py\\n+++ b/src/accelerate/commands/launch.py\\n@@ -37,7 +37,6 @@\\n PrecisionType,\\n PrepareForLaunch,\\n _filter_args,\\n- get_launch_prefix,\\n is_deepspeed_available,\\n is_rich_available,\\n is_sagemaker_available,\\n@@ -477,55 +476,58 @@ def multi_gpu_launcher(args):\\n def deepspeed_launcher(args):\\n if not is_deepspeed_available():\\n raise ImportError(\"DeepSpeed is not installed => run `pip3 install deepspeed` or build it from source.\")\\n- cmd = [\"deepspeed\", \"--no_local_rank\"]\\n- if args.num_machines > 1:\\n- if args.deepspeed_multinode_launcher == DEEPSPEED_MULTINODE_LAUNCHERS[1]:\\n- cmd = get_launch_prefix()\\n+ num_processes = getattr(args, \"num_processes\")\\n+ num_machines = getattr(args, \"num_machines\")\\n+ main_process_ip = getattr(args, \"main_process_ip\")\\n+ main_process_port = getattr(args, \"main_process_port\")\\n+ if num_machines > 1 and args.deepspeed_multinode_launcher != DEEPSPEED_MULTINODE_LAUNCHERS[1]:\\n+ cmd = [\"deepspeed\", \"--no_local_rank\"]\\n+ cmd.extend([\"--hostfile\", str(args.deepspeed_hostfile), \"--launcher\", str(args.deepspeed_multinode_launcher)])\\n+ if args.deepspeed_exclusion_filter is not None:\\n cmd.extend(\\n [\\n- \"--nproc_per_node\",\\n- str(args.num_processes // args.num_machines),\\n- \"--nnodes\",\\n- str(args.num_machines),\\n- \"--node_rank\",\\n- str(args.machine_rank),\\n- \"--master_addr\",\\n- args.main_process_ip,\\n- \"--master_port\",\\n- str(args.main_process_port),\\n+ \"--exclude\",\\n+ str(args.deepspeed_exclusion_filter),\\n ]\\n )\\n- else:\\n+ elif args.deepspeed_inclusion_filter is not None:\\n cmd.extend(\\n- [\"--hostfile\", str(args.deepspeed_hostfile), \"--launcher\", str(args.deepspeed_multinode_launcher)]\\n+ [\\n+ \"--include\",\\n+ str(args.deepspeed_inclusion_filter),\\n+ ]\\n )\\n- if args.deepspeed_exclusion_filter is not None:\\n- cmd.extend(\\n- [\\n- \"--exclude\",\\n- str(args.deepspeed_exclusion_filter),\\n- ]\\n- )\\n- elif args.deepspeed_inclusion_filter is not None:\\n- cmd.extend(\\n- [\\n- \"--include\",\\n- str(args.deepspeed_inclusion_filter),\\n- ]\\n- )\\n- else:\\n- cmd.extend([\"--num_gpus\", str(args.num_processes // args.num_machines)])\\n+ else:\\n+ cmd.extend([\"--num_gpus\", str(args.num_processes // args.num_machines)])\\n+\\n+ if args.module and args.no_python:\\n+ raise ValueError(\"--module and --no_python cannot be used together\")\\n+ elif args.module:\\n+ cmd.append(\"--module\")\\n+ elif args.no_python:\\n+ cmd.append(\"--no_python\")\\n+ cmd.append(args.training_script)\\n+ cmd.extend(args.training_script_args)\\n+ elif num_machines > 1 and args.deepspeed_multinode_launcher == DEEPSPEED_MULTINODE_LAUNCHERS[1]:\\n+ setattr(args, \"nproc_per_node\", str(num_processes // num_machines))\\n+ setattr(args, \"nnodes\", str(num_machines))\\n+ setattr(args, \"node_rank\", int(args.machine_rank))\\n+ if getattr(args, \"same_network\"):\\n+ setattr(args, \"master_addr\", str(main_process_ip))\\n+ setattr(args, \"master_port\", str(main_process_port))\\n+ else:\\n+ setattr(args, \"rdzv_endpoint\", f\"{main_process_ip}:{main_process_port}\")\\n else:\\n- cmd.extend([\"--num_gpus\", str(args.num_processes)])\\n+ setattr(args, \"nproc_per_node\", str(num_processes))\\n+ if main_process_port is not None:\\n+ setattr(args, \"master_port\", str(main_process_port))\\n \\n if args.module and args.no_python:\\n raise ValueError(\"--module and --no_python cannot be used together\")\\n elif args.module:\\n- cmd.append(\"--module\")\\n+ setattr(args, \"module\", True)\\n elif args.no_python:\\n- cmd.append(\"--no_python\")\\n- cmd.append(args.training_script)\\n- cmd.extend(args.training_script_args)\\n+ setattr(args, \"no_python\", True)\\n \\n current_env = os.environ.copy()\\n try:\\n@@ -558,10 +560,24 @@ def deepspeed_launcher(args):\\n continue\\n f.write(f\"{key}={value}\\\\n\")\\n \\n- process = subprocess.Popen(cmd, env=current_env)\\n- process.wait()\\n- if process.returncode != 0:\\n- raise subprocess.CalledProcessError(returncode=process.returncode, cmd=cmd)\\n+ process = subprocess.Popen(cmd, env=current_env)\\n+ process.wait()\\n+ if process.returncode != 0:\\n+ raise subprocess.CalledProcessError(returncode=process.returncode, cmd=cmd)\\n+ else:\\n+ if is_torch_version(\"<\", \"1.9.0\"):\\n+ raise NotImplementedError(\"Multi-node training requires pytorch>=1.9.0\")\\n+\\n+ debug = getattr(args, \"debug\", False)\\n+ args = _filter_args(args)\\n+ with patch_environment(**current_env):\\n+ try:\\n+ distrib_run.run(args)\\n+ except:\\n+ if debug:\\n+ console = get_console()\\n+ console.print(\"\\\\n[bold red]Using --debug, `torch.distributed` Stack Trace:[/bold red]\")\\n+ console.print_exception(suppress=[__file__], show_locals=False)\\n \\n \\n def tpu_launcher(args):\\n',\n", - " 'code_comments': [],\n", - " 'context': [{'body': '_The documentation is not available anymore as the PR was closed or merged._',\n", - " 'from_author': False},\n", - " {'body': '> There is a bit too much in this PR to wrap my head around. Can we split it between multiGPU launcher fixes, DeepSpeed launcher fixes and other fixes? Thanks!\\r\\n\\r\\n1. MultiGPU launcher fixes and simplification was put in another PR by Zach #627 \\r\\n2. Minor other fixes are in #630 \\r\\n3. This will make deepspeed launcher updates to remove a call to subprocess',\n", - " 'from_author': True}],\n", - " 'url': 'https://api.github.com/repos/huggingface/accelerate/pulls/626'},\n", - " 1023444774: {'diff': 'diff --git a/src/accelerate/commands/launch.py b/src/accelerate/commands/launch.py\\nindex 7cb4529d9..117932f2b 100644\\n--- a/src/accelerate/commands/launch.py\\n+++ b/src/accelerate/commands/launch.py\\n@@ -334,6 +334,8 @@ def simple_launcher(args):\\n current_env = os.environ.copy()\\n current_env[\"USE_CPU\"] = str(args.cpu or args.use_cpu)\\n current_env[\"USE_MPS_DEVICE\"] = str(args.use_mps_device)\\n+ if args.use_mps_device:\\n+ current_env[\"PYTORCH_ENABLE_MPS_FALLBACK\"] = \"1\"\\n if args.num_machines > 1:\\n current_env[\"MASTER_ADDR\"] = args.main_process_ip\\n current_env[\"MASTER_PORT\"] = str(args.main_process_port)\\ndiff --git a/src/accelerate/state.py b/src/accelerate/state.py\\nindex bd339048f..4e0900eb4 100644\\n--- a/src/accelerate/state.py\\n+++ b/src/accelerate/state.py\\n@@ -13,6 +13,7 @@\\n # limitations under the License.\\n \\n import os\\n+import warnings\\n from distutils.util import strtobool\\n \\n import torch\\n@@ -221,6 +222,14 @@ def __init__(\\n \"and/or you do not have an MPS-enabled device on this machine.\"\\n )\\n else:\\n+ from .utils import is_torch_version\\n+\\n+ if not is_torch_version(\">\", \"1.12.0\"):\\n+ warnings.warn(\\n+ \"We strongly recommend to install PyTorch >= 1.13 (nightly version at the time of writing) on your MacOS machine. \"\\n+ \"It has major fixes related to model correctness and performance improvements for transformer based models. \"\\n+ \"Please refer to https://github.com/pytorch/pytorch/issues/82707 for more details.\"\\n+ )\\n self.device = torch.device(\"mps\")\\n elif cpu or not torch.cuda.is_available():\\n self.device = torch.device(\"cpu\")\\n',\n", - " 'code_comments': [{'body': 'That warning may not age well once PyTorch 1.13 is released. Maybe:\\r\\n```suggestion\\r\\n \"We strongly recommend to install PyTorch >= 1.13 (nightly version at the time of writing) on your MacOS machine. \"\\r\\n```',\n", - " 'diff_hunk': '@@ -221,6 +222,14 @@ def __init__(\\n \"and/or you do not have an MPS-enabled device on this machine.\"\\n )\\n else:\\n+ from .utils import is_torch_version\\n+\\n+ if not is_torch_version(\">\", \"1.12.0\"):\\n+ warnings.warn(\\n+ \"We strongly recommend to install the latest PyTorch nightly version (1.13.0.dev...) on your MacOS machine. \"',\n", - " 'from_author': False}],\n", - " 'context': [{'body': '_The documentation is not available anymore as the PR was closed or merged._',\n", - " 'from_author': False}],\n", - " 'url': 'https://api.github.com/repos/huggingface/accelerate/pulls/625'},\n", - " 1022069195: {'diff': 'diff --git a/src/accelerate/commands/launch.py b/src/accelerate/commands/launch.py\\nindex 8e372adea..7cb4529d9 100644\\n--- a/src/accelerate/commands/launch.py\\n+++ b/src/accelerate/commands/launch.py\\n@@ -32,19 +32,26 @@\\n from accelerate.commands.config.config_args import SageMakerConfig\\n from accelerate.state import get_int_from_env\\n from accelerate.utils import (\\n+ TORCH_LAUNCH_PARAMS,\\n ComputeEnvironment,\\n DistributedType,\\n PrecisionType,\\n PrepareForLaunch,\\n+ _filter_args,\\n get_launch_prefix,\\n is_deepspeed_available,\\n is_sagemaker_available,\\n+ is_torch_version,\\n patch_environment,\\n )\\n from accelerate.utils.constants import DEEPSPEED_MULTINODE_LAUNCHERS\\n from accelerate.utils.dataclasses import SageMakerDistributedType\\n \\n \\n+if is_torch_version(\">=\", \"1.9.0\"):\\n+ import torch.distributed.run as distrib_run\\n+\\n+\\n logger = logging.getLogger(__name__)\\n \\n \\n@@ -355,43 +362,32 @@ def simple_launcher(args):\\n \\n \\n def multi_gpu_launcher(args):\\n- cmd = get_launch_prefix()\\n- if args.num_machines > 1:\\n- cmd.extend(\\n- [\\n- \"--nproc_per_node\",\\n- str(args.num_processes // args.num_machines),\\n- \"--nnodes\",\\n- str(args.num_machines),\\n- \"--node_rank\",\\n- str(args.machine_rank),\\n- \"--master_addr\",\\n- args.main_process_ip,\\n- \"--master_port\",\\n- str(args.main_process_port),\\n- ]\\n- )\\n+ num_processes = getattr(args, \"num_processes\")\\n+ num_machines = getattr(args, \"num_machines\")\\n+ if num_machines > 1:\\n+ setattr(args, \"nproc_per_node\", str(num_processes // num_machines))\\n+ setattr(args, \"nnodes\", str(num_machines))\\n+ setattr(args, \"machine_rank\", str(args.machine_rank))\\n+ setattr(args, \"master_addr\", str(args.main_process_ip))\\n+ setattr(args, \"master_port\", str(args.main_process_port))\\n else:\\n- cmd.extend([\"--nproc_per_node\", str(args.num_processes)])\\n+ setattr(args, \"nproc_per_node\", str(num_processes))\\n if args.main_process_port is not None:\\n- cmd.extend([\"--master_port\", str(args.main_process_port)])\\n+ setattr(args, \"master_port\", str(args.main_process_port))\\n \\n if args.module and args.no_python:\\n raise ValueError(\"--module and --no_python cannot be used together\")\\n elif args.module:\\n- cmd.append(\"--module\")\\n+ setattr(args, \"module\", True)\\n elif args.no_python:\\n- cmd.append(\"--no_python\")\\n- cmd.append(args.training_script)\\n- cmd.extend(args.training_script_args)\\n+ setattr(args, \"no_python\", True)\\n \\n current_env = os.environ.copy()\\n+ mixed_precision = args.mixed_precision.lower()\\n try:\\n- mixed_precision = PrecisionType(args.mixed_precision.lower())\\n+ mixed_precision = PrecisionType(mixed_precision)\\n except ValueError:\\n- raise ValueError(\\n- f\"Unknown mixed_precision mode: {args.mixed_precision.lower()}. Choose between {PrecisionType.list()}.\"\\n- )\\n+ raise ValueError(f\"Unknown mixed_precision mode: {mixed_precision}. Choose between {PrecisionType.list()}.\")\\n \\n if args.fp16:\\n warnings.warn(\\'--fp16 flag is deprecated. Use \"--mixed_precision fp16\" instead.\\', DeprecationWarning)\\n@@ -444,10 +440,25 @@ def multi_gpu_launcher(args):\\n if args.fsdp_state_dict_type is not None:\\n current_env[\"FSDP_STATE_DICT_TYPE\"] = str(args.fsdp_state_dict_type)\\n current_env[\"OMP_NUM_THREADS\"] = str(args.num_cpu_threads_per_process)\\n- process = subprocess.Popen(cmd, env=current_env)\\n- process.wait()\\n- if process.returncode != 0:\\n- raise subprocess.CalledProcessError(returncode=process.returncode, cmd=cmd)\\n+ if is_torch_version(\">=\", \"1.9.0\"):\\n+ distrib_args = _filter_args(args)\\n+ with patch_environment(**current_env):\\n+ distrib_run.run(distrib_args)\\n+ else:\\n+ # We still have to use subprocess, the user won\\'t get a clean traceback as a result\\n+ cmd = get_launch_prefix()\\n+ for k, v in vars(args).items():\\n+ if k in TORCH_LAUNCH_PARAMS and v:\\n+ param = [f\"--{k}\"]\\n+ if not v:\\n+ param.append(v)\\n+ cmd.extend(param)\\n+ cmd.append(args.training_script)\\n+ cmd.extend(args.training_script_args)\\n+ process = subprocess.Popen(cmd, env=current_env)\\n+ process.wait()\\n+ if process.returncode != 0:\\n+ raise subprocess.CalledProcessError(returncode=process.returncode, cmd=cmd)\\n \\n \\n def deepspeed_launcher(args):\\ndiff --git a/src/accelerate/utils/__init__.py b/src/accelerate/utils/__init__.py\\nindex db21afc3a..2b8f566f0 100644\\n--- a/src/accelerate/utils/__init__.py\\n+++ b/src/accelerate/utils/__init__.py\\n@@ -2,7 +2,7 @@\\n # There\\'s no way to ignore \"F401 \\'...\\' imported but unused\" warnings in this\\n # module, but to preserve other warnings. So, don\\'t check this module at all\\n \\n-from .constants import MODEL_NAME, OPTIMIZER_NAME, RNG_STATE_NAME, SCALER_NAME, SCHEDULER_NAME\\n+from .constants import MODEL_NAME, OPTIMIZER_NAME, RNG_STATE_NAME, SCALER_NAME, SCHEDULER_NAME, TORCH_LAUNCH_PARAMS\\n from .dataclasses import (\\n ComputeEnvironment,\\n DeepSpeedPlugin,\\n@@ -91,7 +91,7 @@\\n HfDeepSpeedConfig,\\n )\\n \\n-from .launch import PrepareForLaunch, get_launch_prefix\\n+from .launch import PrepareForLaunch, _filter_args, get_launch_prefix\\n from .memory import find_executable_batch_size\\n from .other import (\\n extract_model_from_parallel,\\ndiff --git a/src/accelerate/utils/constants.py b/src/accelerate/utils/constants.py\\nindex 4e7c71853..934923bef 100644\\n--- a/src/accelerate/utils/constants.py\\n+++ b/src/accelerate/utils/constants.py\\n@@ -31,3 +31,30 @@\\n DEEPSPEED_MULTINODE_LAUNCHERS = [\"pdsh\", \"standard\", \"openmpi\", \"mvapich\"]\\n \\n STR_OPERATION_TO_FUNC = {\">\": op.gt, \">=\": op.ge, \"==\": op.eq, \"!=\": op.ne, \"<=\": op.le, \"<\": op.lt}\\n+\\n+# These are the args for `torch.distributed.launch` for pytorch < 1.9\\n+TORCH_LAUNCH_PARAMS = [\\n+ \"nnodes\",\\n+ \"nproc_per_node\",\\n+ \"rdzv_backend\",\\n+ \"rdzv_endpoint\",\\n+ \"rdzv_id\",\\n+ \"rdzv_conf\",\\n+ \"standalone\",\\n+ \"max_restarts\",\\n+ \"monitor_interval\",\\n+ \"start_method\",\\n+ \"role\",\\n+ \"module\",\\n+ \"m\",\\n+ \"no_python\",\\n+ \"run_path\",\\n+ \"log_dir\",\\n+ \"r\",\\n+ \"redirects\",\\n+ \"t\",\\n+ \"tee\",\\n+ \"node_rank\",\\n+ \"master_addr\",\\n+ \"master_port\",\\n+]\\ndiff --git a/src/accelerate/utils/launch.py b/src/accelerate/utils/launch.py\\nindex c74185c5d..c6605ee26 100644\\n--- a/src/accelerate/utils/launch.py\\n+++ b/src/accelerate/utils/launch.py\\n@@ -21,6 +21,12 @@\\n from .dataclasses import DistributedType\\n \\n \\n+if is_torch_version(\">=\", \"1.9.0\"):\\n+ import torch.distributed.run as distrib_run\\n+else:\\n+ import torch.distributed.launch as distrib_run\\n+\\n+\\n def get_launch_prefix():\\n \"\"\"\\n Grabs the correct launcher for starting a distributed command, such as either `torchrun`, `python -m\\n@@ -35,6 +41,23 @@ def get_launch_prefix():\\n return cmd\\n \\n \\n+def _filter_args(args):\\n+ \"\"\"\\n+ Filters out all `accelerate` specific args\\n+ \"\"\"\\n+ distrib_args = distrib_run.get_args_parser()\\n+ known_args, _ = distrib_args.parse_known_args()\\n+ for arg in list(vars(args).keys()):\\n+ if arg not in vars(known_args).keys():\\n+ delattr(args, arg)\\n+ distrib_args = distrib_run.parse_args(vars(args))\\n+ for key, value in vars(args).items():\\n+ setattr(distrib_args, key, value)\\n+ if is_torch_version(\"<\", \"1.9.0\"):\\n+ setattr(distrib_args, \"use_env\", True)\\n+ return distrib_args\\n+\\n+\\n class PrepareForLaunch:\\n \"\"\"\\n Prepare a function that will launched in a distributed setup.\\n',\n", - " 'code_comments': [{'body': 'Clean!',\n", - " 'diff_hunk': '@@ -35,6 +36,21 @@ def get_launch_prefix():\\n return cmd\\n \\n \\n+def _filter_args(args):',\n", - " 'from_author': False},\n", - " {'body': \"Please use a regular list here, it's going to be easier to maintain (in terms of diff)\",\n", - " 'diff_hunk': '@@ -31,3 +31,8 @@\\n DEEPSPEED_MULTINODE_LAUNCHERS = [\"pdsh\", \"standard\", \"openmpi\", \"mvapich\"]\\n \\n STR_OPERATION_TO_FUNC = {\">\": op.gt, \">=\": op.ge, \"==\": op.eq, \"!=\": op.ne, \"<=\": op.le, \"<\": op.lt}\\n+\\n+# These are the args for `torch.distributed.launch` for pytorch < 1.9\\n+TORCH_LAUNCH_PARAMS = \"\"\"nnodes,nproc_per_node,rdzv_backend,rdzv_endpoint,rdzv_id,rdzv_conf,standalone,max_restarts,monitor_interval,start_method,role,module,m,no_python,run_path,log_dir,r,redirects,t,tee,node_rank,master_addr,master_port\"\"\".split(\\n+ \",\"\\n+)',\n", - " 'from_author': False}],\n", - " 'context': [{'body': '_The documentation is not available anymore as the PR was closed or merged._',\n", - " 'from_author': False}],\n", - " 'url': 'https://api.github.com/repos/huggingface/accelerate/pulls/623'},\n", - " 1021867447: {'diff': 'diff --git a/examples/README.md b/examples/README.md\\nindex 150afb75c..c4f050b3f 100644\\n--- a/examples/README.md\\n+++ b/examples/README.md\\n@@ -136,7 +136,7 @@ To run it in each of these various modes, use the following commands:\\n ```\\n - single GPU:\\n ```bash\\n- python ./nlp_example.py # from a server with a GPU\\n+ python ./cv_example.py # from a server with a GPU\\n ```\\n - with fp16 (mixed-precision)\\n * from any server by passing `fp16=True` to the `Accelerator`.\\n',\n", - " 'code_comments': [],\n", - " 'context': [{'body': 'Thanks for the fix!', 'from_author': False},\n", - " {'body': '_The documentation is not available anymore as the PR was closed or merged._',\n", - " 'from_author': False}],\n", - " 'url': 'https://api.github.com/repos/huggingface/accelerate/pulls/622'},\n", - " 1021713865: {'diff': 'diff --git a/docs/source/usage_guides/gradient_accumulation.mdx b/docs/source/usage_guides/gradient_accumulation.mdx\\nindex f9f5dc62a..bd06efed9 100644\\n--- a/docs/source/usage_guides/gradient_accumulation.mdx\\n+++ b/docs/source/usage_guides/gradient_accumulation.mdx\\n@@ -31,7 +31,6 @@ model.to(device)\\n gradient_accumulation_steps = 2\\n \\n for index, batch in enumerate(training_dataloader):\\n- optimizer.zero_grad()\\n inputs, targets = batch\\n inputs = inputs.to(device)\\n targets = targets.to(device)\\n@@ -42,6 +41,7 @@ for index, batch in enumerate(training_dataloader):\\n if (index + 1) % gradient_accumulation_steps == 0:\\n optimizer.step()\\n scheduler.step()\\n+ optimizer.zero_grad()\\n ```\\n \\n ## Converting it to 🤗 Accelerate\\n@@ -57,7 +57,6 @@ First the code shown earlier will be converted to utilize 🤗 Accelerate withou\\n + )\\n \\n for index, batch in enumerate(training_dataloader):\\n- optimizer.zero_grad()\\n inputs, targets = batch\\n - inputs = inputs.to(device)\\n - targets = targets.to(device)\\n@@ -68,6 +67,7 @@ First the code shown earlier will be converted to utilize 🤗 Accelerate withou\\n if (index+1) % gradient_accumulation_steps == 0:\\n optimizer.step()\\n scheduler.step()\\n+ optimizer.zero_grad()\\n ```\\n \\n \\n@@ -94,7 +94,6 @@ You just wrap it around the entire training part of our code:\\n - for index, batch in enumerate(training_dataloader):\\n + for batch in training_dataloader:\\n + with accelerator.accumulate(model):\\n- optimizer.zero_grad()\\n inputs, targets = batch\\n outputs = model(inputs)\\n ```\\n@@ -107,6 +106,7 @@ You can remove all the special checks for the step number and the loss adjustmen\\n - if (index+1) % gradient_accumulation_steps == 0:\\n optimizer.step()\\n scheduler.step()\\n+ optimizer.zero_grad()\\n ```\\n \\n As you can see the [`Accelerator`] is able to keep track of the batch number you are on and it will automatically know whether to step through the prepared optimizer and how to adjust the loss. \\n@@ -118,11 +118,11 @@ Below is the finished implementation for performing gradient accumulation with \\n ```python\\n for batch in training_dataloader:\\n with accelerator.accumulate(model):\\n- optimizer.zero_grad()\\n inputs, targets = batch\\n outputs = model(inputs)\\n loss = loss_function(outputs, targets)\\n accelerator.backward(loss)\\n optimizer.step()\\n scheduler.step()\\n+ optimizer.zero_grad()\\n ```\\n',\n", - " 'code_comments': [],\n", - " 'context': [{'body': '_The documentation is not available anymore as the PR was closed or merged._',\n", - " 'from_author': False}],\n", - " 'url': 'https://api.github.com/repos/huggingface/accelerate/pulls/620'},\n", - " 1021681480: {'diff': 'diff --git a/examples/README.md b/examples/README.md\\nindex 271ffd3c5..150afb75c 100644\\n--- a/examples/README.md\\n+++ b/examples/README.md\\n@@ -184,6 +184,10 @@ To run it in each of these various modes, use the following commands:\\n * In PyTorch:\\n Add an `xmp.spawn` line in your script as you usually do.\\n \\n+### Simple vision example (GANs)\\n+\\n+- [huggan project](https://github.com/huggingface/community-events/tree/main/huggan)\\n+ \\n ## Finer Examples\\n \\n While the first two scripts are extremely barebones when it comes to what you can do with accelerate, more advanced features are documented in two other locations.\\n',\n", - " 'code_comments': [{'body': '```suggestion\\r\\n- [huggan project](https://github.com/huggingface/community-events/tree/main/huggan)\\r\\n```',\n", - " 'diff_hunk': '@@ -184,6 +184,10 @@ To run it in each of these various modes, use the following commands:\\n * In PyTorch:\\n Add an `xmp.spawn` line in your script as you usually do.\\n \\n+### Simple vision example (GANs)\\n+\\n+- Please see: https://github.com/huggingface/community-events/tree/main/huggan',\n", - " 'from_author': False}],\n", - " 'context': [{'body': '_The documentation is not available anymore as the PR was closed or merged._',\n", - " 'from_author': False}],\n", - " 'url': 'https://api.github.com/repos/huggingface/accelerate/pulls/619'},\n", - " 1020716188: {'diff': 'diff --git a/setup.py b/setup.py\\nindex 8247aa5fe..4bc977029 100644\\n--- a/setup.py\\n+++ b/setup.py\\n@@ -50,7 +50,7 @@\\n ]\\n },\\n python_requires=\">=3.7.0\",\\n- install_requires=[\"numpy>=1.17\", \"packaging>=20.0\", \"psutil\", \"pyyaml\", \"torch>=1.4.0\"],\\n+ install_requires=[\"numpy>=1.17\", \"packaging>=20.0\", \"psutil\", \"pyyaml\", \"torch>=1.4.0\", \"rich\"],\\n extras_require=extras,\\n classifiers=[\\n \"Development Status :: 5 - Production/Stable\",\\ndiff --git a/src/accelerate/__init__.py b/src/accelerate/__init__.py\\nindex 145b0aa27..03a7198ac 100644\\n--- a/src/accelerate/__init__.py\\n+++ b/src/accelerate/__init__.py\\n@@ -17,5 +17,6 @@\\n find_executable_batch_size,\\n infer_auto_device_map,\\n load_checkpoint_in_model,\\n+ rich,\\n synchronize_rng_states,\\n )\\ndiff --git a/src/accelerate/commands/launch.py b/src/accelerate/commands/launch.py\\nindex fcb26228a..ea27afb75 100644\\n--- a/src/accelerate/commands/launch.py\\n+++ b/src/accelerate/commands/launch.py\\n@@ -46,11 +46,15 @@\\n )\\n from accelerate.utils.constants import DEEPSPEED_MULTINODE_LAUNCHERS\\n from accelerate.utils.dataclasses import SageMakerDistributedType\\n+from rich import get_console\\n+from rich.logging import RichHandler\\n \\n \\n if is_torch_version(\">=\", \"1.9.0\"):\\n import torch.distributed.run as distrib_run\\n \\n+FORMAT = \"%(message)s\"\\n+logging.basicConfig(format=FORMAT, datefmt=\"[%X]\", handlers=[RichHandler()])\\n \\n logger = logging.getLogger(__name__)\\n \\n@@ -301,7 +305,12 @@ def launch_command_parser(subparsers=None):\\n \"--aws_secret_access_key\",\\n type=str,\\n default=None,\\n- help=\"The AWS_SECRET_ACCESS_KEY used to launch the Amazon SageMaker training job\",\\n+ help=\"The AWS_SECRET_ACCESS_KEY used to launch the Amazon SageMaker training job.\",\\n+ )\\n+ parser.add_argument(\\n+ \"--debug\",\\n+ action=\"store_true\",\\n+ help=\"Whether to print out the torch.distributed stack trace when something fails.\",\\n )\\n parser.add_argument(\\n \"training_script\",\\n@@ -443,9 +452,17 @@ def multi_gpu_launcher(args):\\n current_env[\"FSDP_STATE_DICT_TYPE\"] = str(args.fsdp_state_dict_type)\\n current_env[\"OMP_NUM_THREADS\"] = str(args.num_cpu_threads_per_process)\\n if is_torch_version(\">=\", \"1.9.0\"):\\n+ debug = getattr(args, \"debug\", False)\\n args = _filter_args(args)\\n with patch_environment(**current_env):\\n- distrib_run.run(args)\\n+ console = get_console()\\n+\\n+ try:\\n+ distrib_run.run(args)\\n+ except:\\n+ if debug:\\n+ console.print(\"\\\\n[bold red]Using --debug, `torch.distributed` Stack Trace:[/bold red]\")\\n+ console.print_exception(suppress=[__file__], show_locals=False)\\n else:\\n # We still have to use subprocess, the user won\\'t get a clean traceback as a result\\n cmd = get_launch_prefix()\\ndiff --git a/src/accelerate/utils/launch.py b/src/accelerate/utils/launch.py\\nindex 1a452ca63..31d71ad21 100644\\n--- a/src/accelerate/utils/launch.py\\n+++ b/src/accelerate/utils/launch.py\\n@@ -23,8 +23,6 @@\\n \\n if is_torch_version(\">=\", \"1.9.0\"):\\n import torch.distributed.run as distrib_run\\n-else:\\n- import torch.distributed.launch as distrib_run\\n \\n \\n def get_launch_prefix():\\ndiff --git a/src/accelerate/utils/rich.py b/src/accelerate/utils/rich.py\\nnew file mode 100644\\nindex 000000000..7a54c8b5a\\n--- /dev/null\\n+++ b/src/accelerate/utils/rich.py\\n@@ -0,0 +1,18 @@\\n+# Copyright 2022 The HuggingFace Team. All rights reserved.\\n+#\\n+# Licensed under the Apache License, Version 2.0 (the \"License\");\\n+# you may not use this file except in compliance with the License.\\n+# You may obtain a copy of the License at\\n+#\\n+# http://www.apache.org/licenses/LICENSE-2.0\\n+#\\n+# Unless required by applicable law or agreed to in writing, software\\n+# distributed under the License is distributed on an \"AS IS\" BASIS,\\n+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\\n+# See the License for the specific language governing permissions and\\n+# limitations under the License.\\n+\\n+from rich.traceback import install\\n+\\n+\\n+install(show_locals=False)\\n',\n", - " 'code_comments': [{'body': 'I think this is a leftover of your tests?',\n", - " 'diff_hunk': '@@ -97,6 +98,8 @@ def collate_fn(examples):\\n def training_function(config, args):\\n # Initialize accelerator\\n accelerator = Accelerator(cpu=args.cpu, mixed_precision=args.mixed_precision)\\n+ if accelerator.process_index == 1:\\n+ raise ValueError()',\n", - " 'from_author': False},\n", - " {'body': 'You put `rich` in the core install here but use an `is_rich_available` later on.',\n", - " 'diff_hunk': '@@ -50,7 +50,7 @@\\n ]\\n },\\n python_requires=\">=3.7.0\",\\n- install_requires=[\"numpy>=1.17\", \"packaging>=20.0\", \"psutil\", \"pyyaml\", \"torch>=1.4.0\"],\\n+ install_requires=[\"numpy>=1.17\", \"packaging>=20.0\", \"psutil\", \"pyyaml\", \"torch>=1.4.0\", \"rich\"],',\n", - " 'from_author': False},\n", - " {'body': \"Should be its own list in a constant probably, so it's easier to read/add/see diff\",\n", - " 'diff_hunk': '@@ -444,10 +441,29 @@ def multi_gpu_launcher(args):\\n if args.fsdp_state_dict_type is not None:\\n current_env[\"FSDP_STATE_DICT_TYPE\"] = str(args.fsdp_state_dict_type)\\n current_env[\"OMP_NUM_THREADS\"] = str(args.num_cpu_threads_per_process)\\n- process = subprocess.Popen(cmd, env=current_env)\\n- process.wait()\\n- if process.returncode != 0:\\n- raise subprocess.CalledProcessError(returncode=process.returncode, cmd=cmd)\\n+ # use torchrun directly here and try to call it\\n+\\n+ with patch_environment(**current_env):\\n+ import torch.distributed.run as distrib_run\\n+ debug = getattr(args, \"debug\", False)\\n+ keys = \"\"\"nnodes,nproc_per_node,rdzv_backend,rdzv_endpoint,rdzv_id,rdzv_conf,standalone,max_restarts,monitor_interval,start_method,role,module,m,no_python,run_path,log_dir,r,redirects,t,tee,node_rank,master_addr,master_port,training_script,training_script_args\"\"\".split(\",\")',\n", - " 'from_author': False},\n", - " {'body': 'Thanks, fixed this now! 🤗 ',\n", - " 'diff_hunk': '@@ -97,6 +98,8 @@ def collate_fn(examples):\\n def training_function(config, args):\\n # Initialize accelerator\\n accelerator = Accelerator(cpu=args.cpu, mixed_precision=args.mixed_precision)\\n+ if accelerator.process_index == 1:\\n+ raise ValueError()',\n", - " 'from_author': True},\n", - " {'body': 'As discussed offline, opted to make rich a true dep in Accelerate',\n", - " 'diff_hunk': '@@ -50,7 +50,7 @@\\n ]\\n },\\n python_requires=\">=3.7.0\",\\n- install_requires=[\"numpy>=1.17\", \"packaging>=20.0\", \"psutil\", \"pyyaml\", \"torch>=1.4.0\"],\\n+ install_requires=[\"numpy>=1.17\", \"packaging>=20.0\", \"psutil\", \"pyyaml\", \"torch>=1.4.0\", \"rich\"],',\n", - " 'from_author': True}],\n", - " 'context': [{'body': '_The documentation is not available anymore as the PR was closed or merged._',\n", - " 'from_author': False},\n", - " {'body': \"Nice utility! LGTM 🤗, I think Sylvain's suggestion would be a really good way to use this feature. Also, I think this won't result in rich traceback if the issue was in creating an `Accelerate` object itself, right? if so, should that be mentioned as part of the documentation?\",\n", - " 'from_author': False},\n", - " {'body': \"In Multi GPU launcher, if one isn't using `--debug` then they won't see any error stack trace at all (which might be hard to read but important for root cause and resolution). This makes `--debug` sort of mandatory instead of optional. Can the user have normal error traceback if `--debug` isn't given? Please let me know if I am missing something or way off. \\r\\n\\r\\nWent over it again with a sample code, \\r\\n```python\\r\\nfrom rich.traceback import \\r\\ninstall install(show_locals=True)\\r\\n``` \\r\\nmakes rich the default traceback handler. `--debug` meaning is no more what Sylvain was referring to, hence my confusion. \",\n", - " 'from_author': False}],\n", - " 'url': 'https://api.github.com/repos/huggingface/accelerate/pulls/613'},\n", - " 1020571705: {'diff': 'diff --git a/.github/workflows/nightly.yml b/.github/workflows/nightly.yml\\nindex c0c9c62f2..cb0a7ee42 100644\\n--- a/.github/workflows/nightly.yml\\n+++ b/.github/workflows/nightly.yml\\n@@ -57,10 +57,16 @@ jobs:\\n git fetch && git checkout ${{ github.sha }}\\n pip install -e . --no-deps\\n \\n- - name: Run test on GPUs\\n+ - name: Run core and big modeling tests on GPUs\\n run: |\\n source activate accelerate\\n- make test\\n+ make test_big_modeling\\n+ make test_core\\n+\\n+ - name: Run Integration tests on GPUs\\n+ run: |\\n+ source activate accelerate\\n+ make test_integrations\\n \\n - name: Run examples on GPUs\\n run: |\\ndiff --git a/Makefile b/Makefile\\nindex 100a1484a..c1c79d6e8 100644\\n--- a/Makefile\\n+++ b/Makefile\\n@@ -44,6 +44,8 @@ test_examples:\\n \\tpython -m pytest -s -v ./tests/test_examples.py\\n \\n # Broken down example tests for the CI runners\\n+test_integrations:\\n+\\tpython -m pytest -s -v ./tests/deepspeed ./tests/fsdp\\n test_example_differences:\\n \\tpython -m pytest -s -v ./tests/test_examples.py::ExampleDifferenceTests\\n \\ndiff --git a/tests/deepspeed/test_deepspeed.py b/tests/deepspeed/test_deepspeed.py\\nindex 30eb2a4d8..1be794421 100644\\n--- a/tests/deepspeed/test_deepspeed.py\\n+++ b/tests/deepspeed/test_deepspeed.py\\n@@ -35,7 +35,6 @@\\n require_cuda,\\n require_deepspeed,\\n require_multi_gpu,\\n- skip,\\n slow,\\n )\\n from accelerate.test_utils.training import RegressionDataset\\n@@ -697,7 +696,6 @@ def test_performance(self):\\n with patch_environment(omp_num_threads=1):\\n execute_subprocess_async(cmd_stage, env=os.environ.copy())\\n \\n- @skip\\n def test_checkpointing(self):\\n self.test_file_path = os.path.join(self.test_scripts_folder, \"test_checkpointing.py\")\\n cmd = [\\n',\n", - " 'code_comments': [],\n", - " 'context': [{'body': '_The documentation is not available anymore as the PR was closed or merged._',\n", - " 'from_author': False},\n", - " {'body': 'Thank you 😄. This is neat and finally all the tests would run as expected 🤗. ',\n", - " 'from_author': False}],\n", - " 'url': 'https://api.github.com/repos/huggingface/accelerate/pulls/612'},\n", - " 1020543878: {'diff': 'diff --git a/.github/workflows/check_dependencies.yml b/.github/workflows/build_and_run_tests.yml\\nsimilarity index 82%\\nrename from .github/workflows/check_dependencies.yml\\nrename to .github/workflows/build_and_run_tests.yml\\nindex f8ac1f492..a5378353d 100644\\n--- a/.github/workflows/check_dependencies.yml\\n+++ b/.github/workflows/build_and_run_tests.yml\\n@@ -10,7 +10,7 @@ env:\\n GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}\\n \\n jobs:\\n- check-for-setup:\\n+ check-for-source:\\n runs-on: ubuntu-latest\\n name: Check if setup was changed\\n outputs:\\n@@ -28,7 +28,7 @@ jobs:\\n id: was_changed\\n run: |\\n for file in ${{ steps.changed-files.outputs.all_changed_files }}; do\\n- if [ `basename \"${file}\"` = \"setup.py\" ]; then\\n+ if [ `basename \"${file}\"` == \"setup.py\" ]; then\\n echo ::set-output name=changed::\"1\"\\n fi\\n done\\n@@ -36,10 +36,10 @@ jobs:\\n build-docker-containers:\\n needs: check-for-setup\\n if: (github.event_name == \\'push\\') && (needs.check-for-setup.outputs.changed == \\'1\\')\\n- uses: ./.github/workflows/build-docker-images.yml\\n+ uses: ./.github/workflows/build_docker_images.yml\\n secrets: inherit\\n \\n- run-tests:\\n+ run-merge-tests:\\n needs: build-docker-containers\\n if: always()\\n- uses: ./.github/workflows/on-merge.yml\\n\\\\ No newline at end of file\\n+ uses: ./.github/workflows/run_merge_tests.yml\\n\\\\ No newline at end of file\\ndiff --git a/.github/workflows/build-docker-images.yml b/.github/workflows/build_docker_images.yml\\nsimilarity index 100%\\nrename from .github/workflows/build-docker-images.yml\\nrename to .github/workflows/build_docker_images.yml\\ndiff --git a/.github/workflows/on-merge.yml b/.github/workflows/run_merge_tests.yml\\nsimilarity index 100%\\nrename from .github/workflows/on-merge.yml\\nrename to .github/workflows/run_merge_tests.yml\\ndiff --git a/.github/workflows/test.yml b/.github/workflows/test.yml\\nindex d39d36951..5d8ff0c1b 100644\\n--- a/.github/workflows/test.yml\\n+++ b/.github/workflows/test.yml\\n@@ -1,6 +1,13 @@\\n name: Run Tests\\n \\n-on: [pull_request]\\n+on:\\n+ pull_request:\\n+ paths:\\n+ - \"src/**\"\\n+ - \"tests/**\"\\n+ - \".github/**\"\\n+ - \"setup.py\"\\n+ types: [opened, synchronize, reopened]\\n \\n env:\\n HF_HOME: ~/hf_cache\\n',\n", - " 'code_comments': [{'body': 'I feel this name should be `Checks if setup.py were changed`, as I can only see it checks that file.\\r\\n\\r\\nAnd on merge event to `main`, it seems the tests are always run.',\n", - " 'diff_hunk': '@@ -10,9 +10,9 @@ env:\\n GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}\\n \\n jobs:\\n- check-for-setup:\\n+ check-for-source:\\n runs-on: ubuntu-latest\\n- name: Check if setup was changed\\n+ name: Checks if source code or tests were changed',\n", - " 'from_author': False}],\n", - " 'context': [{'body': '_The documentation is not available anymore as the PR was closed or merged._',\n", - " 'from_author': False},\n", - " {'body': 'So the purpose of `check-for-source` in `build_and_run_tests.yml` and `test.yml` are different?\\r\\n\\r\\n- on PR request: check source file, and run the tests only if any changed found.\\r\\n- on merge (to main): check `setup.py` to build the image if necessary. Then always run the tests?',\n", - " 'from_author': False},\n", - " {'body': 'I am probably wrong above, as I see you have `needs: build-docker-containers`. \\r\\nBut I am confused a bit: currently (without this PR), what happens (on merge) if `setup.py` is not changed, but some `.py` files change?\\r\\n\\r\\nI will review in more detail tomorrow.\\r\\n',\n", - " 'from_author': False},\n", - " {'body': \"@ydshieh if some `.py` was changed but not the setup then the tests are ran but Docker images won't be rebuilt\",\n", - " 'from_author': True},\n", - " {'body': 'Thanks, @muellerzr . So in this PR, what happens (on merge) if no `.py` file changes at all (`setup.py` not change + no other `.py` change)? If I understand, the tests still run. Is this the expected behavior?',\n", - " 'from_author': False},\n", - " {'body': 'My only remaining question is as above: on merge, should we skip the tests if no `.py` file changes at all. Otherwise, all LGTM.',\n", - " 'from_author': False},\n", - " {'body': \"@ydshieh (sorry this got buried in my priorities):\\r\\n\\r\\nRe: on merge, yes it is because we never know if some other dependency has a regression issue or what. So we're still made aware of when this happens post merge and can immediately know it's unrelated to something directly in that PR without it silently not failing until the nightlies\",\n", - " 'from_author': True}],\n", - " 'url': 'https://api.github.com/repos/huggingface/accelerate/pulls/611'},\n", - " 1020495465: {'diff': 'diff --git a/docs/source/index.mdx b/docs/source/index.mdx\\nindex 3f0f58802..1664fd7a2 100644\\n--- a/docs/source/index.mdx\\n+++ b/docs/source/index.mdx\\n@@ -55,7 +55,7 @@ accelerate launch {my_script.py}\\n >
Tutorials
\\n

Learn the basics and become familiar with using 🤗 Accelerate. Start here if you are using 🤗 Accelerate for the first time!

\\n \\n-
How-to guides
\\n

Practical guides to help you achieve a specific goal. Take a look at these guides to learn how to use 🤗 Accelerate to solve real-world problems.

\\n
\\n@@ -68,4 +68,4 @@ accelerate launch {my_script.py}\\n

Technical descriptions of how 🤗 Accelerate classes and methods work.

\\n \\n
\\n-
\\n\\\\ No newline at end of file\\n+\\n',\n", - " 'code_comments': [],\n", - " 'context': [{'body': '_The documentation is not available anymore as the PR was closed or merged._',\n", - " 'from_author': False}],\n", - " 'url': 'https://api.github.com/repos/huggingface/accelerate/pulls/610'},\n", - " 1018728968: {'diff': 'diff --git a/src/accelerate/utils/operations.py b/src/accelerate/utils/operations.py\\nindex 42868a0a5..3ba46a03f 100644\\n--- a/src/accelerate/utils/operations.py\\n+++ b/src/accelerate/utils/operations.py\\n@@ -103,7 +103,7 @@ def recursively_apply(func, data, *args, test_type=is_torch_tensor, error_on_oth\\n return data\\n \\n \\n-def send_to_device(tensor, device):\\n+def send_to_device(tensor, device, non_blocking=False):\\n \"\"\"\\n Recursively sends the elements in a nested list/tuple/dictionary of tensors to a given device.\\n \\n@@ -117,13 +117,16 @@ def send_to_device(tensor, device):\\n The same data structure as `tensor` with all tensors sent to the proper device.\\n \"\"\"\\n \\n- def _send_to_device(t, device):\\n- return t.to(device)\\n+ def _send_to_device(t, device, non_blocking):\\n+ try:\\n+ return t.to(device, non_blocking=non_blocking)\\n+ except TypeError: # .to() doesn\\'t accept non_blocking as kwarg\\n+ return t.to(device)\\n \\n def _has_to_method(t):\\n return hasattr(t, \"to\")\\n \\n- return recursively_apply(_send_to_device, tensor, device, test_type=_has_to_method)\\n+ return recursively_apply(_send_to_device, tensor, device, non_blocking, test_type=_has_to_method)\\n \\n \\n def get_data_structure(data):\\n',\n", - " 'code_comments': [{'body': 'So far all objects in Pytorch which accept `.to()` method accept the `non_blocking` kwarg or at least kwargs in general, but are we sure this is going to be always the case?\\r\\n\\r\\n- [torch.jit.ScriptModule.to](https://pytorch.org/docs/stable/generated/torch.jit.ScriptModule.html?highlight=#torch.jit.ScriptModule.to) (Python method, in ScriptModule)\\r\\n- [torch.nn.Module.to](https://pytorch.org/docs/stable/generated/torch.nn.Module.html?highlight=#torch.nn.Module.to) (Python method, in Module)\\r\\n- [torch.nn.utils.rnn.PackedSequence.to](https://pytorch.org/docs/stable/generated/torch.nn.utils.rnn.PackedSequence.html?highlight=#torch.nn.utils.rnn.PackedSequence.to) (Python method, in PackedSequence)\\r\\n- [torch.Tensor.to](https://pytorch.org/docs/stable/generated/torch.Tensor.to.html?highlight=#torch.Tensor.to) (Python method, in torch.Tensor.to)\\r\\n\\r\\ncc @sgugger ',\n", - " 'diff_hunk': '@@ -117,13 +117,13 @@ def send_to_device(tensor, device):\\n The same data structure as `tensor` with all tensors sent to the proper device.\\n \"\"\"\\n \\n- def _send_to_device(t, device):\\n- return t.to(device)\\n+ def _send_to_device(t, device, non_blocking):\\n+ return t.to(device, non_blocking=non_blocking)\\n \\n def _has_to_method(t):\\n return hasattr(t, \"to\")',\n", - " 'from_author': True},\n", - " {'body': 'This is for our own objects FYI, `BatchEncoding` in Transformers, which does not accept the `non_blocking` argument :-/\\r\\nMaybe we can inspect the signature and only pass it when we see it?',\n", - " 'diff_hunk': '@@ -117,13 +117,13 @@ def send_to_device(tensor, device):\\n The same data structure as `tensor` with all tensors sent to the proper device.\\n \"\"\"\\n \\n- def _send_to_device(t, device):\\n- return t.to(device)\\n+ def _send_to_device(t, device, non_blocking):\\n+ return t.to(device, non_blocking=non_blocking)\\n \\n def _has_to_method(t):\\n return hasattr(t, \"to\")',\n", - " 'from_author': False},\n", - " {'body': 'I tried using `t.to.__code__` and `inspect.signature(t.to)` and both give errors. Do you have a better way to get kwargs of a `builtin_function_or_method` @sgugger ?',\n", - " 'diff_hunk': '@@ -117,13 +117,13 @@ def send_to_device(tensor, device):\\n The same data structure as `tensor` with all tensors sent to the proper device.\\n \"\"\"\\n \\n- def _send_to_device(t, device):\\n- return t.to(device)\\n+ def _send_to_device(t, device, non_blocking):\\n+ return t.to(device, non_blocking=non_blocking)\\n \\n def _has_to_method(t):\\n return hasattr(t, \"to\")',\n", - " 'from_author': True},\n", - " {'body': 'Mmm, `inspect.signature(t.to)` should give you access to the keyword arguments. You can find them in the `.parameters` attribute, which will be a dict-like object.',\n", - " 'diff_hunk': '@@ -117,13 +117,13 @@ def send_to_device(tensor, device):\\n The same data structure as `tensor` with all tensors sent to the proper device.\\n \"\"\"\\n \\n- def _send_to_device(t, device):\\n- return t.to(device)\\n+ def _send_to_device(t, device, non_blocking):\\n+ return t.to(device, non_blocking=non_blocking)\\n \\n def _has_to_method(t):\\n return hasattr(t, \"to\")',\n", - " 'from_author': False},\n", - " {'body': 'I get this error when trying `inspect.signature(t.to)` where `t` is a tensor\\r\\n```\\r\\nTraceback (most recent call last):\\r\\n File \"\", line 1, in \\r\\n File \"/home/nouamane/miniconda3/envs/hf/lib/python3.9/inspect.py\", line 3111, in signature\\r\\n return Signature.from_callable(obj, follow_wrapped=follow_wrapped)\\r\\n File \"/home/nouamane/miniconda3/envs/hf/lib/python3.9/inspect.py\", line 2860, in from_callable\\r\\n return _signature_from_callable(obj, sigcls=cls,\\r\\n File \"/home/nouamane/miniconda3/envs/hf/lib/python3.9/inspect.py\", line 2327, in _signature_from_callable\\r\\n return _signature_from_builtin(sigcls, obj,\\r\\n File \"/home/nouamane/miniconda3/envs/hf/lib/python3.9/inspect.py\", line 2145, in _signature_from_builtin\\r\\n raise ValueError(\"no signature found for builtin {!r}\".format(func))\\r\\nValueError: no signature found for builtin \\r\\n```',\n", - " 'diff_hunk': '@@ -117,13 +117,13 @@ def send_to_device(tensor, device):\\n The same data structure as `tensor` with all tensors sent to the proper device.\\n \"\"\"\\n \\n- def _send_to_device(t, device):\\n- return t.to(device)\\n+ def _send_to_device(t, device, non_blocking):\\n+ return t.to(device, non_blocking=non_blocking)\\n \\n def _has_to_method(t):\\n return hasattr(t, \"to\")',\n", - " 'from_author': True},\n", - " {'body': \"I think this is related to the warning in [inspect's docs](https://docs.python.org/3/library/inspect.html#inspect.signature)\\r\\n\\r\\n> Note: Some callables may not be introspectable in certain implementations of Python. For example, in CPython, some built-in functions defined in C provide no metadata about their arguments.\",\n", - " 'diff_hunk': '@@ -117,13 +117,13 @@ def send_to_device(tensor, device):\\n The same data structure as `tensor` with all tensors sent to the proper device.\\n \"\"\"\\n \\n- def _send_to_device(t, device):\\n- return t.to(device)\\n+ def _send_to_device(t, device, non_blocking):\\n+ return t.to(device, non_blocking=non_blocking)\\n \\n def _has_to_method(t):\\n return hasattr(t, \"to\")',\n", - " 'from_author': True},\n", - " {'body': 'Maybe we can isolate tensors in the function then? Something like:\\r\\n```\\r\\nif isinstance(t, torch.Tensor):\\r\\n return t.to(device, non_blocking=non_blocking)\\r\\nelse:\\r\\n # inspect signature here in a try/except block\\r\\n```',\n", - " 'diff_hunk': '@@ -117,13 +117,13 @@ def send_to_device(tensor, device):\\n The same data structure as `tensor` with all tensors sent to the proper device.\\n \"\"\"\\n \\n- def _send_to_device(t, device):\\n- return t.to(device)\\n+ def _send_to_device(t, device, non_blocking):\\n+ return t.to(device, non_blocking=non_blocking)\\n \\n def _has_to_method(t):\\n return hasattr(t, \"to\")',\n", - " 'from_author': False},\n", - " {'body': \"Wouldn't this be enough, or should I inspect signature?\\r\\n ```python\\r\\n def _send_to_device(t, device, non_blocking):\\r\\n if isinstance(t, torch.Tensor):\\r\\n return t.to(device, non_blocking=non_blocking)\\r\\n else:\\r\\n try:\\r\\n return t.to(device, non_blocking=non_blocking)\\r\\n except TypeError:\\r\\n return t.to(device)\\r\\n```\",\n", - " 'diff_hunk': '@@ -117,13 +117,13 @@ def send_to_device(tensor, device):\\n The same data structure as `tensor` with all tensors sent to the proper device.\\n \"\"\"\\n \\n- def _send_to_device(t, device):\\n- return t.to(device)\\n+ def _send_to_device(t, device, non_blocking):\\n+ return t.to(device, non_blocking=non_blocking)\\n \\n def _has_to_method(t):\\n return hasattr(t, \"to\")',\n", - " 'from_author': True},\n", - " {'body': \"Oh in this case, you don't even need the first test. Are you sure about the type of `TypeError`?\",\n", - " 'diff_hunk': '@@ -117,13 +117,13 @@ def send_to_device(tensor, device):\\n The same data structure as `tensor` with all tensors sent to the proper device.\\n \"\"\"\\n \\n- def _send_to_device(t, device):\\n- return t.to(device)\\n+ def _send_to_device(t, device, non_blocking):\\n+ return t.to(device, non_blocking=non_blocking)\\n \\n def _has_to_method(t):\\n return hasattr(t, \"to\")',\n", - " 'from_author': False},\n", - " {'body': \"Searching on google for `python unexpected argument`, it's usually `TypeError: ... got an unexpected keyword`. I'm not sure if it's always the case though\",\n", - " 'diff_hunk': '@@ -117,13 +117,13 @@ def send_to_device(tensor, device):\\n The same data structure as `tensor` with all tensors sent to the proper device.\\n \"\"\"\\n \\n- def _send_to_device(t, device):\\n- return t.to(device)\\n+ def _send_to_device(t, device, non_blocking):\\n+ return t.to(device, non_blocking=non_blocking)\\n \\n def _has_to_method(t):\\n return hasattr(t, \"to\")',\n", - " 'from_author': True},\n", - " {'body': 'Seems to work for C defined methods as well:\\r\\n```python\\r\\n>> t.__abs__(a=5)\\r\\nTraceback (most recent call last):\\r\\n File \"\", line 1, in \\r\\nTypeError: Tensor.abs() takes no keyword arguments\\r\\n```\\r\\n\\r\\n\\r\\n```python\\r\\n>> t.to(a=5)\\r\\nTraceback (most recent call last):\\r\\n File \"\", line 1, in \\r\\nTypeError: to() received an invalid combination of arguments - got (a=int, ), but expected one of:\\r\\n * (torch.device device, torch.dtype dtype, bool non_blocking, bool copy, *, torch.memory_format memory_format)\\r\\n * (torch.dtype dtype, bool non_blocking, bool copy, *, torch.memory_format memory_format)\\r\\n * (Tensor tensor, bool non_blocking, bool copy, *, torch.memory_format memory_format)\\r\\n```',\n", - " 'diff_hunk': '@@ -117,13 +117,13 @@ def send_to_device(tensor, device):\\n The same data structure as `tensor` with all tensors sent to the proper device.\\n \"\"\"\\n \\n- def _send_to_device(t, device):\\n- return t.to(device)\\n+ def _send_to_device(t, device, non_blocking):\\n+ return t.to(device, non_blocking=non_blocking)\\n \\n def _has_to_method(t):\\n return hasattr(t, \"to\")',\n", - " 'from_author': True},\n", - " {'body': 'Nice!',\n", - " 'diff_hunk': '@@ -117,13 +117,13 @@ def send_to_device(tensor, device):\\n The same data structure as `tensor` with all tensors sent to the proper device.\\n \"\"\"\\n \\n- def _send_to_device(t, device):\\n- return t.to(device)\\n+ def _send_to_device(t, device, non_blocking):\\n+ return t.to(device, non_blocking=non_blocking)\\n \\n def _has_to_method(t):\\n return hasattr(t, \"to\")',\n", - " 'from_author': False},\n", - " {'body': \"@sgugger the docs say that `tensor` is a torch.Tensor here, which isn't necessary the case iiuc. We can probably update the docs as well\",\n", - " 'diff_hunk': '@@ -103,7 +103,7 @@ def recursively_apply(func, data, *args, test_type=is_torch_tensor, error_on_oth\\n return data\\n \\n \\n-def send_to_device(tensor, device):\\n+def send_to_device(tensor, device, non_blocking=False):\\n \"\"\"\\n Recursively sends the elements in a nested list/tuple/dictionary of tensors to a given device.',\n", - " 'from_author': True},\n", - " {'body': 'True',\n", - " 'diff_hunk': '@@ -103,7 +103,7 @@ def recursively_apply(func, data, *args, test_type=is_torch_tensor, error_on_oth\\n return data\\n \\n \\n-def send_to_device(tensor, device):\\n+def send_to_device(tensor, device, non_blocking=False):\\n \"\"\"\\n Recursively sends the elements in a nested list/tuple/dictionary of tensors to a given device.',\n", - " 'from_author': False},\n", - " {'body': 'We could leave that for another PR ',\n", - " 'diff_hunk': '@@ -103,7 +103,7 @@ def recursively_apply(func, data, *args, test_type=is_torch_tensor, error_on_oth\\n return data\\n \\n \\n-def send_to_device(tensor, device):\\n+def send_to_device(tensor, device, non_blocking=False):\\n \"\"\"\\n Recursively sends the elements in a nested list/tuple/dictionary of tensors to a given device.',\n", - " 'from_author': True}],\n", - " 'context': [{'body': '_The documentation is not available anymore as the PR was closed or merged._',\n", - " 'from_author': False},\n", - " {'body': 'This issue has been automatically marked as stale because it has not had recent activity. If you think this still needs to be addressed please comment on this thread.\\n\\nPlease note that issues that do not follow the [contributing guidelines](https://github.com/huggingface/accelerate/blob/main/CONTRIBUTING.md) are likely to be ignored.',\n", - " 'from_author': False},\n", - " {'body': 'Hi @sgugger, was there a reason that this was closed and not merged? I came across this when I was about to request the same thing. ',\n", - " 'from_author': False},\n", - " {'body': '@Chris-hughes10 it was automatically closed by the stale bot due to the absence of activity. @NouamaneTazi is still investigating whether this could have some negative impact sometimes.',\n", - " 'from_author': False},\n", - " {'body': '### Should we make .to(non_blocking=True) the default in **accelerate**?\\r\\nLet’s use this script to find out\\r\\n\\r\\n```python\\r\\nimport torch\\r\\nif __name__ == \\'__main__\\':\\r\\n seed = 0\\r\\n torch.manual_seed(seed)\\r\\n torch.cuda.manual_seed(seed)\\r\\n torch.cuda.manual_seed_all(seed)\\r\\n stream = torch.cuda.current_stream()\\r\\n\\r\\n x = torch.rand(32, 256, 220, 220).cuda()\\r\\n\\r\\n t = (x.min() - x.max()).to(torch.device(\"cpu\"), non_blocking=False) # try with False then True\\r\\n # t = (x.min() - x.max()).to(\"cuda:1\", non_blocking=True) # try GPU0 to GPU1 copy\\r\\n\\r\\n print(stream.query()) # False - Checks if all the work submitted has been completed.\\r\\n print(t)\\r\\n stream.synchronize() # wait for stream to finish the work\\r\\n \\r\\n print(stream.query()) # True - work done\\r\\n print(t)\\r\\n```\\r\\n\\r\\n
\\r\\nCopy to CPU with non_blocking=False (default)\\r\\n\\r\\nIn this case `.to()` adds a `cudaStreamSynchronize` op which makes the CPU use the correct value of the tensor when printing\\r\\n![image](https://user-images.githubusercontent.com/29777165/194038267-91033b35-fc18-4a0d-9af1-cfb1e382721e.png)\\r\\n\\r\\n
\\r\\n\\r\\n
\\r\\nCopy to CPU with non_blocking=True \\r\\n\\r\\n\\r\\nIn this case the CPU submits the kernels for `.to()` to the GPU then moves on to perform the print operation which uses an incorrect value for the tensor `tensor(0.)` **(The dangerous part)** \\r\\n![image](https://user-images.githubusercontent.com/29777165/194038545-88833ffc-5141-4fd4-a503-c140202f0bed.png)\\r\\n\\r\\n
\\r\\n\\r\\n
\\r\\nCopy to another GPU with non_blocking=True \\r\\n\\r\\nIt seems that the `non_blocking` here doesn’t do much (we get basically the same thing using `non_blocking=True` ). In both cases we have GPU 1 waiting for GPU 0 to finish working on the tensor, and THEN copy it to GPU 1. And finally the CPU prints the tensor that’s now located on GPU 1\\r\\nIn this case `.to()` creates a `cudaStreamWaitEvent` event (figure 2) which makes GPU 1 waits for GPU 0. I made [an issue](https://discuss.pytorch.org/t/to-cuda-1-non-blocking-true-creates-cudastreamwaitevent/162296) on Pytorch’s forums to investigate why is this the case\\r\\n\\r\\n![image](https://user-images.githubusercontent.com/29777165/194038566-56a7c178-d659-4c3f-a759-d94da8c0f152.png)\\r\\n\\r\\n![image](https://user-images.githubusercontent.com/29777165/194038587-5a089b5c-af42-4bab-86b1-516e2ebe3300.png)\\r\\n\\r\\n
\\r\\n\\r\\n**tldr;** `non_blocking` could be a game changer in using your GPUs efficiently. \\r\\n- **Good use scenario:** Copying your data from CPU to GPU in a non_blocking way then running your model which exists on GPU (this would make the CPU launch the copy kernel, then moves on to queuing other kernels in your model on the GPU. As opposed to waiting for the copy to end, and only then launching kernels from the model). [Example from Pytorch\\'s repo](https://github.com/pytorch/examples/blob/main/imagenet/main.py#L321-L326).\\r\\n- **Bad use scenario:** Copying your data from CPU to GPU in a non_blocking way then start some operations **on CPU** that would use the non-ready tensors. (could be in `if` statements, or simple arithmetics...)\\r\\n\\r\\n\\r\\n=> It’s good to support that argument in accelerate but it’s better to keep the default as it is, just like it’s the case in Pytorch ',\n", - " 'from_author': True},\n", - " {'body': \"@NouamaneTazi These are really interesting insights! If you are looking at inspecting signatures, do you think it would be too much complexity to set non-blocking automatically based on whether it is CPU -> GPU transfer or GPU -> CPU transfer by inspecting the tensor's current device?\\r\\n\\r\\nIf you like the idea, perhaps this would be a different feature though, toggled by an accelerator flag?\",\n", - " 'from_author': False},\n", - " {'body': '@Chris-hughes10 CPU -> GPU and GPU -> CPU both lead to the same issues as mentioned above. Only GPU -> GPU is the safe operation but as I said above, it seems that it requires the two GPUs synchronization whether we set `non_blocking=True` or not',\n", - " 'from_author': True},\n", - " {'body': '> @Chris-hughes10 CPU -> GPU and GPU -> CPU both lead to the same issues as mentioned above. Only GPU -> GPU is the safe operation but as I said above, it seems that it requires the two GPUs synchronization whether we set `non_blocking=True` or not\\r\\n\\r\\nApologies, I misread the post above. Please ignore my previous suggestion!',\n", - " 'from_author': False}],\n", - " 'url': 'https://api.github.com/repos/huggingface/accelerate/pulls/607'},\n", - " 1017453798: {'diff': 'diff --git a/src/accelerate/utils/modeling.py b/src/accelerate/utils/modeling.py\\nindex 188800527..467a757b4 100644\\n--- a/src/accelerate/utils/modeling.py\\n+++ b/src/accelerate/utils/modeling.py\\n@@ -677,7 +677,7 @@ def load_checkpoint_in_model(\\n else:\\n for param_name, param in checkpoint.items():\\n module_name = param_name\\n- if dtype is not None:\\n+ if dtype is not None and not str(param.dtype).startswith((\"torch.uint\", \"torch.int\", \"torch.bool\")):\\n param = param.to(dtype)\\n while len(module_name) > 0 and module_name not in device_map:\\n module_name = \".\".join(module_name.split(\".\")[:-1])\\n',\n", - " 'code_comments': [],\n", - " 'context': [{'body': '_The documentation is not available anymore as the PR was closed or merged._',\n", - " 'from_author': False}],\n", - " 'url': 'https://api.github.com/repos/huggingface/accelerate/pulls/606'},\n", - " 1017337994: {'diff': 'diff --git a/src/accelerate/tracking.py b/src/accelerate/tracking.py\\nindex 673975fc7..839a7a2a9 100644\\n--- a/src/accelerate/tracking.py\\n+++ b/src/accelerate/tracking.py\\n@@ -209,7 +209,7 @@ def __init__(self, run_name: str, **kwargs):\\n \\n @property\\n def tracker(self):\\n- return self.run.run\\n+ return self.run\\n \\n def store_init_configuration(self, values: dict):\\n \"\"\"\\n',\n", - " 'code_comments': [],\n", - " 'context': [{'body': '_The documentation is not available anymore as the PR was closed or merged._',\n", - " 'from_author': False},\n", - " {'body': 'Thanks!', 'from_author': False}],\n", - " 'url': 'https://api.github.com/repos/huggingface/accelerate/pulls/605'},\n", - " 1017176784: {'diff': 'diff --git a/tests/deepspeed/test_deepspeed.py b/tests/deepspeed/test_deepspeed.py\\nindex a8c1aedac..30eb2a4d8 100644\\n--- a/tests/deepspeed/test_deepspeed.py\\n+++ b/tests/deepspeed/test_deepspeed.py\\n@@ -35,6 +35,7 @@\\n require_cuda,\\n require_deepspeed,\\n require_multi_gpu,\\n+ skip,\\n slow,\\n )\\n from accelerate.test_utils.training import RegressionDataset\\n@@ -642,7 +643,9 @@ def setUp(self):\\n \"deepspeed_stage_1_fp16\": 1600,\\n \"deepspeed_stage_2_fp16\": 2500,\\n \"deepspeed_stage_3_zero_init_fp16\": 2800,\\n- \"deepspeed_stage_3_cpu_offload_fp16\": 1900,\\n+ # Disabling below test as it overwhelms the RAM memory usage\\n+ # on CI self-hosted runner leading to tests getting killed.\\n+ # \"deepspeed_stage_3_cpu_offload_fp16\": 1900,\\n }\\n self.n_train = 160\\n self.n_val = 160\\n@@ -694,6 +697,7 @@ def test_performance(self):\\n with patch_environment(omp_num_threads=1):\\n execute_subprocess_async(cmd_stage, env=os.environ.copy())\\n \\n+ @skip\\n def test_checkpointing(self):\\n self.test_file_path = os.path.join(self.test_scripts_folder, \"test_checkpointing.py\")\\n cmd = [\\ndiff --git a/tests/fsdp/test_fsdp.py b/tests/fsdp/test_fsdp.py\\nindex 628e377c3..249d2b692 100644\\n--- a/tests/fsdp/test_fsdp.py\\n+++ b/tests/fsdp/test_fsdp.py\\n@@ -191,7 +191,9 @@ def setUp(self):\\n \"multi_gpu_fp16\": 3200,\\n \"fsdp_shard_grad_op_transformer_based_wrap_fp16\": 2000,\\n \"fsdp_full_shard_transformer_based_wrap_fp16\": 1900,\\n- \"fsdp_full_shard_cpu_offload_transformer_based_wrap_fp32\": 1500, # fp16 was leading to indefinite hang\\n+ # Disabling below test as it overwhelms the RAM memory usage\\n+ # on CI self-hosted runner leading to tests getting killed.\\n+ # \"fsdp_full_shard_cpu_offload_transformer_based_wrap_fp32\": 1500, # fp16 was leading to indefinite hang\\n }\\n self.n_train = 160\\n self.n_val = 160\\n',\n", - " 'code_comments': [],\n", - " 'context': [{'body': '_The documentation is not available anymore as the PR was closed or merged._',\n", - " 'from_author': False}],\n", - " 'url': 'https://api.github.com/repos/huggingface/accelerate/pulls/604'},\n", - " 1016367735: {'diff': 'diff --git a/docs/source/concept_guides/training_tpu.mdx b/docs/source/concept_guides/training_tpu.mdx\\nindex 32736ce31..7fe54b14a 100644\\n--- a/docs/source/concept_guides/training_tpu.mdx\\n+++ b/docs/source/concept_guides/training_tpu.mdx\\n@@ -89,7 +89,7 @@ like:\\n ProcessExitedException: process 0 terminated with signal SIGSEGV\\n ```\\n \\n-This error is *extremely* cryptic but the basic explaination is you ran out of system RAM. You can avoid this entirely by reconfiguring the training function to \\n+This error is *extremely* cryptic but the basic explanation is you ran out of system RAM. You can avoid this entirely by reconfiguring the training function to \\n accept a single `model` argument, and declare it in an outside cell:\\n \\n ```python\\n@@ -137,7 +137,7 @@ accelerator = Accelerator(mixed_precision=\"bf16\")\\n By default this will cast `torch.float` and `torch.double` to `bfloat16` on TPUs. \\n The specific configuration being set is an environmental variable of `XLA_USE_BF16` is set to `1`.\\n \\n-There is a futher configuration you can perform which is setting the `XLA_DOWNCAST_BF16` environmental variable. If set to `1`, then \\n+There is a further configuration you can perform which is setting the `XLA_DOWNCAST_BF16` environmental variable. If set to `1`, then \\n `torch.float` is `bfloat16` and `torch.double` is `float32`.\\n \\n This is performed in the `Accelerator` object when passing `downcast_bf16=True`:\\ndiff --git a/docs/source/quicktour.mdx b/docs/source/quicktour.mdx\\nindex 812b20226..6d92e7958 100644\\n--- a/docs/source/quicktour.mdx\\n+++ b/docs/source/quicktour.mdx\\n@@ -391,7 +391,7 @@ and [`~Accelerator.clip_grad_value_`] respectively.\\n \\n If you are running your training in Mixed Precision with 🤗 Accelerate, you will get the best result with your loss being\\n computed inside your model (like in Transformer models for instance). Every computation outside of the model will be\\n-executed in full precision (which is generally what you want for loss computation, expecially if it involves a\\n+executed in full precision (which is generally what you want for loss computation, especially if it involves a\\n softmax). However you might want to put your loss computation inside the *accelerator.autocast* context manager:\\n \\n ```\\ndiff --git a/docs/source/usage_guides/big_modeling.mdx b/docs/source/usage_guides/big_modeling.mdx\\nindex 243b354d0..9b3252809 100644\\n--- a/docs/source/usage_guides/big_modeling.mdx\\n+++ b/docs/source/usage_guides/big_modeling.mdx\\n@@ -213,7 +213,7 @@ You can let 🤗 Accelerate handle the device map computation by setting `device\\n \\n
\\n \\n-All the options will produce the same result when you don\\'t have enough GPU memory to accomodate the whole model (which is to fit everything that can on the GPU, then offload weights on the CPU or even on the disk if there is not enough RAM). \\n+All the options will produce the same result when you don\\'t have enough GPU memory to accommodate the whole model (which is to fit everything that can on the GPU, then offload weights on the CPU or even on the disk if there is not enough RAM). \\n \\n When you have more GPU memory available than the model size, here the difference between each option:\\n - `\"auto\"` and `\"balanced\"` evenly split the model on all available GPUs, making it possible for you to use a batch size greater than 1.\\ndiff --git a/docs/source/usage_guides/checkpoint.mdx b/docs/source/usage_guides/checkpoint.mdx\\nindex c818c9847..7d6bbbf99 100644\\n--- a/docs/source/usage_guides/checkpoint.mdx\\n+++ b/docs/source/usage_guides/checkpoint.mdx\\n@@ -13,7 +13,7 @@ specific language governing permissions and limitations under the License.\\n # Checkpointing\\n \\n When training a PyTorch model with 🤗 Accelerate, you may often want to save and continue a state of training. Doing so requires\\n-saving and loading the model, optimizer, RNG generators, and the GradScaler. Inside 🤗 Accelerate are two convience functions to achieve this quickly:\\n+saving and loading the model, optimizer, RNG generators, and the GradScaler. Inside 🤗 Accelerate are two convenience functions to achieve this quickly:\\n - Use [`~Accelerator.save_state`] for saving everything mentioned above to a folder location\\n - Use [`~Accelerator.load_state`] for loading everything stored from an earlier `save_state`\\n \\ndiff --git a/docs/source/usage_guides/deepspeed.mdx b/docs/source/usage_guides/deepspeed.mdx\\nindex 8a4238732..29561c77b 100644\\n--- a/docs/source/usage_guides/deepspeed.mdx\\n+++ b/docs/source/usage_guides/deepspeed.mdx\\n@@ -68,7 +68,7 @@ Inference:\\n \\n ## How it works?\\n \\n-**Pre-Requisites**: Install DeepSpeed version >=0.6.5. Please refer to the [DeepSpeed Insallation details](https://github.com/microsoft/DeepSpeed#installation)\\n+**Pre-Requisites**: Install DeepSpeed version >=0.6.5. Please refer to the [DeepSpeed Installation details](https://github.com/microsoft/DeepSpeed#installation)\\n for more information.\\n \\n We will first look at easy to use integration via `accelerate config`. \\n@@ -383,13 +383,13 @@ We will look at the changes needed in the code when using these.\\n ```\\n b. Custom Optim + Custom Scheduler: The case when both `optimizer` and `scheduler` keys are absent in the DeepSpeed config file.\\n In this situation, no code changes are needed from the user and this is the case when using integration via DeepSpeed Plugin.\\n- In the above example we can see that the code reamins unchanged if the `optimizer` and `scheduler` keys are absent in the DeepSpeed config file.\\n+ In the above example we can see that the code remains unchanged if the `optimizer` and `scheduler` keys are absent in the DeepSpeed config file.\\n \\n c. Custom Optim + DS Scheduler: The case when only `scheduler` key is present in the DeepSpeed config file. \\n In this situation, user has to use `accelerate.utils.DummyScheduler` to replace the PyTorch/Custom scheduler in their code. \\n \\n d. DS Optim + Custom Scheduler: The case when only `optimizer` key is present in the DeepSpeed config file. \\n- This will result in an error because one can only use DS Scheduler when using DS Optim.\\n+ This will result in an error because you can only use DS Scheduler when using DS Optim.\\n \\n 2. Notice the `auto` values in the above example DeepSpeed config files. These are automatically handled by `prepare` method \\n based on model, dataloaders, dummy optimizer and dummy schedulers provided to `prepare` method. \\n@@ -435,7 +435,7 @@ ZeRO Stage-3 has 2 options:\\n logging.warning(f\"Failure {status_msg}\")\\n ``` \\n This will create ZeRO model and optimizer partitions along with `zero_to_fp32.py` script in checkpoint directory.\\n- One can use this script to do offline consolidation. \\n+ You can use this script to do offline consolidation. \\n It requires no configuration files or GPUs. Here is an example of its usage: \\n ```bash\\n $ cd /path/to/checkpoint_dir\\n@@ -444,14 +444,14 @@ ZeRO Stage-3 has 2 options:\\n Detected checkpoint of type zero stage 3, world_size: 2\\n Saving fp32 state dict to pytorch_model.bin (total_numel=60506624)\\n ```\\n- To get 32bit model for saving/inference, one can do the following:\\n+ To get 32bit model for saving/inference, you can perform:\\n ```python\\n from deepspeed.utils.zero_to_fp32 import load_state_dict_from_zero_checkpoint\\n \\n unwrapped_model = accelerator.unwrap_model(model)\\n fp32_model = load_state_dict_from_zero_checkpoint(unwrapped_model, checkpoint_dir)\\n ```\\n- If only interested in state_dict, one can do the following:\\n+ If you are only interested in the `state_dict`, you can do the following:\\n ```python\\n from deepspeed.utils.zero_to_fp32 import get_fp32_state_dict_from_zero_checkpoint\\n \\n@@ -462,7 +462,7 @@ ZeRO Stage-3 has 2 options:\\n ## ZeRO Inference\\n DeepSpeed ZeRO Inference supports ZeRO stage 3 with ZeRO-Infinity. \\n It uses the same ZeRO protocol as training, but it doesn\\'t use an optimizer and a lr scheduler and only stage 3 is relevant.\\n-With accelerate integration, one has to just prepare model and dataloader as shown below:\\n+With accelerate integration, you just need to prepare the model and dataloader as shown below:\\n \\n ```python\\n model, eval_dataloader = accelerator.prepare(model, eval_dataloader)\\ndiff --git a/docs/source/usage_guides/tracking.mdx b/docs/source/usage_guides/tracking.mdx\\nindex 7c61b21e9..cc5c17418 100644\\n--- a/docs/source/usage_guides/tracking.mdx\\n+++ b/docs/source/usage_guides/tracking.mdx\\n@@ -104,7 +104,7 @@ Every tracker must implement three functions and have three properties:\\n - This should be implemented as a `@property` function \\n - Should return the internal tracking mechanism the library uses, such as the `run` object for `wandb`.\\n \\n-A brief example can be seen below with an integration with Weights and Biases, containing only the relevent information:\\n+A brief example can be seen below with an integration with Weights and Biases, containing only the relevant information:\\n ```python\\n from accelerate.tracking import GeneralTracker\\n from typing import Optional\\n',\n", - " 'code_comments': [{'body': '```suggestion\\r\\nTo introduce special behavior in your script for TPUs you can check the `distributed_type` of your\\r\\n```',\n", - " 'diff_hunk': '@@ -261,7 +261,7 @@ lot of time. In practice, that means you must take special care to have all your\\n shape (so no dynamic padding for instance if you are in an NLP problem) and should not use layers with for loops that\\n have different lengths depending on the inputs (such as an LSTM) or the training will be excruciatingly slow.\\n \\n-To introduce special behavior in your script for TPUs you can check the `distributed_type` of your\\n+To introduce special behaviour in your script for TPUs you can check the `distributed_type` of your',\n", - " 'from_author': False},\n", - " {'body': '```suggestion\\r\\nInternally, the library works by first analyzing the environment in which the script is launched to determine which\\r\\n```',\n", - " 'diff_hunk': '@@ -447,7 +447,7 @@ will be added in a next version.\\n \\n ## Internal mechanism\\n \\n-Internally, the library works by first analyzing the environment in which the script is launched to determine which\\n+Internally, the library works by first analysing the environment in which the script is launched to determine which',\n", - " 'from_author': False}],\n", - " 'context': [{'body': '_The documentation is not available anymore as the PR was closed or merged._',\n", - " 'from_author': False}],\n", - " 'url': 'https://api.github.com/repos/huggingface/accelerate/pulls/603'},\n", - " 1015934205: {'diff': 'diff --git a/src/accelerate/test_utils/scripts/test_checkpointing.py b/src/accelerate/test_utils/scripts/external_deps/test_checkpointing.py\\nsimilarity index 100%\\nrename from src/accelerate/test_utils/scripts/test_checkpointing.py\\nrename to src/accelerate/test_utils/scripts/external_deps/test_checkpointing.py\\ndiff --git a/src/accelerate/test_utils/scripts/test_peak_memory_usage.py b/src/accelerate/test_utils/scripts/external_deps/test_peak_memory_usage.py\\nsimilarity index 100%\\nrename from src/accelerate/test_utils/scripts/test_peak_memory_usage.py\\nrename to src/accelerate/test_utils/scripts/external_deps/test_peak_memory_usage.py\\ndiff --git a/src/accelerate/test_utils/scripts/test_performance.py b/src/accelerate/test_utils/scripts/external_deps/test_performance.py\\nsimilarity index 100%\\nrename from src/accelerate/test_utils/scripts/test_performance.py\\nrename to src/accelerate/test_utils/scripts/external_deps/test_performance.py\\ndiff --git a/src/accelerate/utils/dataclasses.py b/src/accelerate/utils/dataclasses.py\\nindex 350d95320..c394b50cc 100644\\n--- a/src/accelerate/utils/dataclasses.py\\n+++ b/src/accelerate/utils/dataclasses.py\\n@@ -383,7 +383,7 @@ def deepspeed_config_process(self, prefix=\"\", mismatches=None, config=None, must\\n def set_mixed_precision(self, mixed_precision):\\n ds_config = self.deepspeed_config\\n if mixed_precision == \"fp16\" and \"fp16\" not in ds_config and \"bf16\" not in ds_config:\\n- ds_config.update({\"fp16\": {\"enabled\": True}})\\n+ ds_config.update({\"fp16\": {\"enabled\": True, \"auto_cast\": True}})\\n elif mixed_precision == \"bf16\" and \"fp16\" not in ds_config and \"bf16\" not in ds_config:\\n ds_config.update({\"bf16\": {\"enabled\": True}})\\n \\ndiff --git a/tests/deepspeed/test_deepspeed.py b/tests/deepspeed/test_deepspeed.py\\nindex 6b37eb93e..a8c1aedac 100644\\n--- a/tests/deepspeed/test_deepspeed.py\\n+++ b/tests/deepspeed/test_deepspeed.py\\n@@ -594,7 +594,9 @@ def test_autofill_dsconfig(self):\\n \\n def test_basic_run(self):\\n mod_file = inspect.getfile(accelerate.test_utils)\\n- test_file_path = os.path.sep.join(mod_file.split(os.path.sep)[:-1] + [\"scripts\", \"test_performance.py\"])\\n+ test_file_path = os.path.sep.join(\\n+ mod_file.split(os.path.sep)[:-1] + [\"scripts\", \"external_deps\", \"test_performance.py\"]\\n+ )\\n with tempfile.TemporaryDirectory() as dirpath:\\n cmd = [\\n \"accelerate\",\\n@@ -634,7 +636,7 @@ def setUp(self):\\n \\n self.stages = [1, 2, 3]\\n self.zero3_offload_config = False\\n- self.performance_lower_bound = 0.83\\n+ self.performance_lower_bound = 0.82\\n self.peak_memory_usage_upper_bound = {\\n \"multi_gpu_fp16\": 3200,\\n \"deepspeed_stage_1_fp16\": 1600,\\n@@ -646,7 +648,7 @@ def setUp(self):\\n self.n_val = 160\\n \\n mod_file = inspect.getfile(accelerate.test_utils)\\n- self.test_scripts_folder = os.path.sep.join(mod_file.split(os.path.sep)[:-1] + [\"scripts\"])\\n+ self.test_scripts_folder = os.path.sep.join(mod_file.split(os.path.sep)[:-1] + [\"scripts\", \"external_deps\"])\\n \\n def test_performance(self):\\n self.test_file_path = os.path.join(self.test_scripts_folder, \"test_performance.py\")\\ndiff --git a/tests/fsdp/test_fsdp.py b/tests/fsdp/test_fsdp.py\\nindex 8ad088c04..628e377c3 100644\\n--- a/tests/fsdp/test_fsdp.py\\n+++ b/tests/fsdp/test_fsdp.py\\n@@ -182,7 +182,7 @@ def test_cpu_offload(self):\\n class FSDPIntegrationTest(TempDirTestCase):\\n def setUp(self):\\n super().setUp()\\n- self.performance_lower_bound = 0.83\\n+ self.performance_lower_bound = 0.82\\n self.performance_configs = [\\n \"fsdp_shard_grad_op_transformer_based_wrap\",\\n \"fsdp_full_shard_transformer_based_wrap\",\\n@@ -197,7 +197,7 @@ def setUp(self):\\n self.n_val = 160\\n \\n mod_file = inspect.getfile(accelerate.test_utils)\\n- self.test_scripts_folder = os.path.sep.join(mod_file.split(os.path.sep)[:-1] + [\"scripts\"])\\n+ self.test_scripts_folder = os.path.sep.join(mod_file.split(os.path.sep)[:-1] + [\"scripts\", \"external_deps\"])\\n \\n def test_performance(self):\\n self.test_file_path = os.path.join(self.test_scripts_folder, \"test_performance.py\")\\n',\n", - " 'code_comments': [],\n", - " 'context': [{'body': '_The documentation is not available anymore as the PR was closed or merged._',\n", - " 'from_author': False}],\n", - " 'url': 'https://api.github.com/repos/huggingface/accelerate/pulls/602'},\n", - " 1015387439: {'diff': 'diff --git a/src/accelerate/commands/launch.py b/src/accelerate/commands/launch.py\\nindex 599e710f0..3930745dc 100644\\n--- a/src/accelerate/commands/launch.py\\n+++ b/src/accelerate/commands/launch.py\\n@@ -339,17 +339,6 @@ def simple_launcher(args):\\n mixed_precision = \"fp16\"\\n \\n current_env[\"MIXED_PRECISION\"] = str(mixed_precision)\\n- if args.num_cpu_threads_per_process is None:\\n- local_size = get_int_from_env(\\n- [\"MPI_LOCALNRANKS\", \"OMPI_COMM_WORLD_LOCAL_SIZE\", \"MV2_COMM_WORLD_LOCAL_SIZE\"], 1\\n- )\\n- args.num_cpu_threads_per_process = int(psutil.cpu_count(logical=False) / local_size)\\n- if args.num_cpu_threads_per_process == 0:\\n- args.num_cpu_threads_per_process = 1\\n- logger.info(\\n- f\"num_cpu_threads_per_process unset, we set it at {args.num_cpu_threads_per_process} to improve oob performance.\"\\n- )\\n-\\n current_env[\"OMP_NUM_THREADS\"] = str(args.num_cpu_threads_per_process)\\n \\n process = subprocess.Popen(cmd, env=current_env)\\n@@ -447,9 +436,6 @@ def multi_gpu_launcher(args):\\n current_env[\"FSDP_BACKWARD_PREFETCH\"] = str(args.fsdp_backward_prefetch_policy)\\n if args.fsdp_state_dict_type is not None:\\n current_env[\"FSDP_STATE_DICT_TYPE\"] = str(args.fsdp_state_dict_type)\\n- if args.num_cpu_threads_per_process is None:\\n- args.num_cpu_threads_per_process = 1\\n- logger.info(f\"num_cpu_threads_per_process unset, we set it at {args.num_cpu_threads_per_process}.\")\\n current_env[\"OMP_NUM_THREADS\"] = str(args.num_cpu_threads_per_process)\\n process = subprocess.Popen(cmd, env=current_env)\\n process.wait()\\n@@ -803,6 +789,17 @@ def launch_command(args):\\n if \"--num_processes\" in warn:\\n warned[i] = warn.replace(\"`1`\", f\"`{args.num_processes}`\")\\n \\n+ if args.num_cpu_threads_per_process is None:\\n+ local_size = get_int_from_env(\\n+ [\"MPI_LOCALNRANKS\", \"OMPI_COMM_WORLD_LOCAL_SIZE\", \"MV2_COMM_WORLD_LOCAL_SIZE\"], 1\\n+ )\\n+ args.num_cpu_threads_per_process = int(psutil.cpu_count(logical=False) / local_size)\\n+ if args.num_cpu_threads_per_process == 0:\\n+ args.num_cpu_threads_per_process = 1\\n+ warned.append(\\n+ f\"\\\\t`--num_cpu_threads_per_process` was set to `{args.num_cpu_threads_per_process}` to improve out-of-box performance\"\\n+ )\\n+\\n if any(warned):\\n message = \"The following values were not passed to `accelerate launch` and had defaults used instead:\\\\n\"\\n message += \"\\\\n\".join(warned)\\n',\n", - " 'code_comments': [],\n", - " 'context': [{'body': '_The documentation is not available anymore as the PR was closed or merged._',\n", - " 'from_author': False}],\n", - " 'url': 'https://api.github.com/repos/huggingface/accelerate/pulls/598'},\n", - " 1015126898: {'diff': 'diff --git a/docs/source/_toctree.yml b/docs/source/_toctree.yml\\nindex 0da843ec2..84c4e03f6 100644\\n--- a/docs/source/_toctree.yml\\n+++ b/docs/source/_toctree.yml\\n@@ -33,6 +33,8 @@\\n title: How to avoid CUDA Out-of-Memory\\n - local: usage_guides/sagemaker\\n title: Using Accelerate on SageMaker\\n+ - local: usage_guides/mps\\n+ title: How to use Apple Silicon M1 GPUs\\n title: How-To Guides\\n - sections:\\n - local: concept_guides/gradient_synchronization\\ndiff --git a/docs/source/usage_guides/mps.mdx b/docs/source/usage_guides/mps.mdx\\nnew file mode 100644\\nindex 000000000..43a00f104\\n--- /dev/null\\n+++ b/docs/source/usage_guides/mps.mdx\\n@@ -0,0 +1,81 @@\\n+\\n+\\n+# Accelerated PyTorch Training on Mac\\n+\\n+With PyTorch v1.12 release, developers and researchers can take advantage of Apple silicon GPUs for significantly faster model training. \\n+This unlocks the ability to perform machine learning workflows like prototyping and fine-tuning locally, right on Mac.\\n+Apple\\'s Metal Performance Shaders (MPS) as a backend for PyTorch enables this and can be used via the new `\"mps\"` device. \\n+This will map computational graphs and primitives on the MPS Graph framework and tuned kernels provided by MPS.\\n+For more information please refer official documents [Introducing Accelerated PyTorch Training on Mac](https://pytorch.org/blog/introducing-accelerated-pytorch-training-on-mac/)\\n+and [MPS BACKEND](https://pytorch.org/docs/stable/notes/mps.html).\\n+\\n+### Benefits of Training and Inference using Apple M1 Chips\\n+\\n+1. Enables users to train larger networks or batch sizes locally\\n+2. Reduces data retrieval latency and provides the GPU with direct access to the full memory store due to unified memory architecture. \\n+Therefore, improving end-to-end performance.\\n+3. Reduces costs associated with cloud-based development or the need for additional local GPUs.\\n+\\n+**Pre-requisites**: To install torch with mps support, \\n+please follow this nice medium article [GPU-Acceleration Comes to PyTorch on M1 Macs](https://medium.com/towards-data-science/gpu-acceleration-comes-to-pytorch-on-m1-macs-195c399efcc1).\\n+\\n+\\n+## How it works out of the box\\n+\\n+On your machine(s) just run:\\n+\\n+```bash\\n+accelerate config\\n+```\\n+\\n+and answer the questions asked, specifically choose `MPS` for the query:\\n+\\n+```\\n+ Which type of machine are you using?. \\n+ ```\\n+\\n+This will generate a config file that will be used automatically to properly set \\n+the default options when doing `accelerate launch`, such as the one shown below:\\n+\\n+```bash\\n+compute_environment: LOCAL_MACHINE\\n+deepspeed_config: {}\\n+distributed_type: MPS\\n+downcast_bf16: \\'no\\'\\n+fsdp_config: {}\\n+machine_rank: 0\\n+main_process_ip: null\\n+main_process_port: null\\n+main_training_function: main\\n+mixed_precision: \\'no\\'\\n+num_machines: 1\\n+num_processes: 1\\n+use_cpu: false\\n+```\\n+\\n+After this configuration has been made, here is how you run the CV example \\n+(from the root of the repo) with MPS enabled:\\n+\\n+```bash\\n+accelerate launch /examples/cv_example.py --data_dir images\\n+```\\n+\\n+## A few caveats to be aware of\\n+\\n+1. For `nlp_example.py` the metrics are too bad when compared to CPU-only training. \\n+This means certain operations in BERT model are going wrong using mps device and this needs to be fixed by PyTorch.\\n+2. Distributed setups `gloo` and `nccl` are not working with `mps` device. \\n+This means that currently only single GPU of `mps` device type can be used.\\n+\\n+Finally, please, remember that, 🤗 `Accelerate` only integrates MPS backend, therefore if you\\n+have any problems or questions with regards to MPS backend usage, please, file an issue with [PyTorch GitHub](https://github.com/pytorch/pytorch/issues).\\n\\\\ No newline at end of file\\ndiff --git a/src/accelerate/accelerator.py b/src/accelerate/accelerator.py\\nindex bbd8db435..9cd4b7da3 100644\\n--- a/src/accelerate/accelerator.py\\n+++ b/src/accelerate/accelerator.py\\n@@ -29,7 +29,7 @@\\n from .logging import get_logger\\n from .optimizer import AcceleratedOptimizer\\n from .scheduler import AcceleratedScheduler\\n-from .state import AcceleratorState, GradientState\\n+from .state import AcceleratorState, GradientState, parse_flag_from_env\\n from .tracking import LOGGER_TYPE_TO_CLASS, GeneralTracker, filter_trackers\\n from .utils import (\\n MODEL_NAME,\\n@@ -282,7 +282,7 @@ def __init__(\\n self.native_amp = is_torch_version(\">=\", \"1.6\")\\n if not self.native_amp:\\n raise ValueError(err.format(mode=\"fp16\", requirement=\"PyTorch >= 1.6\"))\\n- if not torch.cuda.is_available():\\n+ if not torch.cuda.is_available() and not parse_flag_from_env(\"USE_MPS_DEVICE\"):\\n raise ValueError(err.format(mode=\"fp16\", requirement=\"a GPU\"))\\n kwargs = self.scaler_handler.to_kwargs() if self.scaler_handler is not None else {}\\n if self.distributed_type == DistributedType.FSDP:\\ndiff --git a/src/accelerate/commands/config/cluster.py b/src/accelerate/commands/config/cluster.py\\nindex ba677e3eb..1ba5839e2 100644\\n--- a/src/accelerate/commands/config/cluster.py\\n+++ b/src/accelerate/commands/config/cluster.py\\n@@ -28,9 +28,9 @@\\n \\n def get_cluster_input():\\n distributed_type = _ask_field(\\n- \"Which type of machine are you using? ([0] No distributed training, [1] multi-CPU, [2] multi-GPU, [3] TPU): \",\\n+ \"Which type of machine are you using? ([0] No distributed training, [1] multi-CPU, [2] multi-GPU, [3] TPU [4] MPS): \",\\n _convert_distributed_mode,\\n- error_message=\"Please enter 0, 1, 2 or 3.\",\\n+ error_message=\"Please enter 0, 1, 2, 3 or 4.\",\\n )\\n \\n machine_rank = 0\\ndiff --git a/src/accelerate/commands/config/config_utils.py b/src/accelerate/commands/config/config_utils.py\\nindex fe72be267..9dd1f4c99 100644\\n--- a/src/accelerate/commands/config/config_utils.py\\n+++ b/src/accelerate/commands/config/config_utils.py\\n@@ -37,7 +37,7 @@ def _convert_compute_environment(value):\\n \\n def _convert_distributed_mode(value):\\n value = int(value)\\n- return DistributedType([\"NO\", \"MULTI_CPU\", \"MULTI_GPU\", \"TPU\"][value])\\n+ return DistributedType([\"NO\", \"MULTI_CPU\", \"MULTI_GPU\", \"TPU\", \"MPS\"][value])\\n \\n \\n def _convert_sagemaker_distributed_mode(value):\\ndiff --git a/src/accelerate/commands/launch.py b/src/accelerate/commands/launch.py\\nindex 599e710f0..863f903c1 100644\\n--- a/src/accelerate/commands/launch.py\\n+++ b/src/accelerate/commands/launch.py\\n@@ -63,6 +63,12 @@ def launch_command_parser(subparsers=None):\\n action=\"store_true\",\\n help=\"Whether or not this should launch a distributed GPU training.\",\\n )\\n+ parser.add_argument(\\n+ \"--use_mps_device\",\\n+ default=False,\\n+ action=\"store_true\",\\n+ help=\"Whether or not this should use MPS-enabled GPU device on MacOS machines.\",\\n+ )\\n parser.add_argument(\\n \"--use_deepspeed\",\\n default=False,\\n@@ -320,6 +326,7 @@ def simple_launcher(args):\\n \\n current_env = os.environ.copy()\\n current_env[\"USE_CPU\"] = str(args.cpu or args.use_cpu)\\n+ current_env[\"USE_MPS_DEVICE\"] = str(args.use_mps_device)\\n if args.num_machines > 1:\\n current_env[\"MASTER_ADDR\"] = args.main_process_ip\\n current_env[\"MASTER_PORT\"] = str(args.main_process_port)\\n@@ -752,11 +759,18 @@ def launch_command(args):\\n # Get the default from the config file.\\n if args.config_file is not None or os.path.isfile(default_config_file) and not args.cpu:\\n defaults = load_config_from_file(args.config_file)\\n- if not args.multi_gpu and not args.tpu and not args.use_deepspeed and not args.use_fsdp:\\n+ if (\\n+ not args.multi_gpu\\n+ and not args.tpu\\n+ and not args.use_deepspeed\\n+ and not args.use_fsdp\\n+ and not args.use_mps_device\\n+ ):\\n args.use_deepspeed = defaults.distributed_type == DistributedType.DEEPSPEED\\n args.multi_gpu = defaults.distributed_type == DistributedType.MULTI_GPU\\n args.tpu = defaults.distributed_type == DistributedType.TPU\\n args.use_fsdp = defaults.distributed_type == DistributedType.FSDP\\n+ args.use_mps_device = defaults.distributed_type == DistributedType.MPS\\n if defaults.compute_environment == ComputeEnvironment.LOCAL_MACHINE:\\n # Update args with the defaults\\n for name, attr in defaults.__dict__.items():\\ndiff --git a/src/accelerate/state.py b/src/accelerate/state.py\\nindex a4f6bac2a..bd339048f 100644\\n--- a/src/accelerate/state.py\\n+++ b/src/accelerate/state.py\\n@@ -206,7 +206,26 @@ def __init__(\\n self.distributed_type = DistributedType.NO\\n self.num_processes = 1\\n self.process_index = self.local_process_index = 0\\n- self.device = torch.device(\"cuda\" if torch.cuda.is_available() and not cpu else \"cpu\")\\n+ if parse_flag_from_env(\"USE_MPS_DEVICE\") and not cpu:\\n+ if not torch.backends.mps.is_available():\\n+ if not torch.backends.mps.is_built():\\n+ raise AssertionError(\\n+ \"MPS not available because the current PyTorch install was not \"\\n+ \"built with MPS enabled. Please install torch version >=1.12.0 on \"\\n+ \"your Apple silicon Mac running macOS 12.3 or later with a native \"\\n+ \"version (arm64) of Python\"\\n+ )\\n+ else:\\n+ raise AssertionError(\\n+ \"MPS not available because the current MacOS version is not 12.3+ \"\\n+ \"and/or you do not have an MPS-enabled device on this machine.\"\\n+ )\\n+ else:\\n+ self.device = torch.device(\"mps\")\\n+ elif cpu or not torch.cuda.is_available():\\n+ self.device = torch.device(\"cpu\")\\n+ else:\\n+ self.device = torch.device(\"cuda\")\\n self.mixed_precision = mixed_precision\\n self.initialized = True\\n \\ndiff --git a/src/accelerate/utils/dataclasses.py b/src/accelerate/utils/dataclasses.py\\nindex 350d95320..50c80adb1 100644\\n--- a/src/accelerate/utils/dataclasses.py\\n+++ b/src/accelerate/utils/dataclasses.py\\n@@ -123,6 +123,7 @@ class DistributedType(str, enum.Enum):\\n DEEPSPEED = \"DEEPSPEED\"\\n FSDP = \"FSDP\"\\n TPU = \"TPU\"\\n+ MPS = \"MPS\"\\n \\n \\n class SageMakerDistributedType(str, enum.Enum):\\n',\n", - " 'code_comments': [{'body': 'Can we do something similar to this [here](https://github.com/huggingface/accelerate/blob/main/src/accelerate/test_utils/testing.py#L94-L99) so that cuda tests can also be ran? As so far tests are only running on the CPU here 😄 ',\n", - " 'diff_hunk': '@@ -272,7 +272,7 @@ def __init__(\\n self.native_amp = is_torch_version(\">=\", \"1.6\")\\n if not self.native_amp:\\n raise ValueError(err.format(mode=\"fp16\", requirement=\"PyTorch >= 1.6\"))\\n- if not torch.cuda.is_available():\\n+ if not torch.cuda.is_available() and os.environ.get(\"USE_MPS_DEVICE\", \"false\") == \"false\":',\n", - " 'from_author': False},\n", - " {'body': \"There might be other truthy values we're not catching here, can se use something like `parse_flag_from_env`?\",\n", - " 'diff_hunk': '@@ -272,7 +272,7 @@ def __init__(\\n self.native_amp = is_torch_version(\">=\", \"1.6\")\\n if not self.native_amp:\\n raise ValueError(err.format(mode=\"fp16\", requirement=\"PyTorch >= 1.6\"))\\n- if not torch.cuda.is_available():\\n+ if not torch.cuda.is_available() and os.environ.get(\"USE_MPS_DEVICE\", \"false\") == \"false\":',\n", - " 'from_author': False},\n", - " {'body': '```suggestion\\r\\n current_env[\"USE_MPS_DEVICE\"] = str(args.use_mps_device)\\r\\n```\\r\\nWe can\\'t rely on users always setting lowercased values, so we should handle any casing.',\n", - " 'diff_hunk': '@@ -320,6 +326,7 @@ def simple_launcher(args):\\n \\n current_env = os.environ.copy()\\n current_env[\"USE_CPU\"] = str(args.cpu or args.use_cpu)\\n+ current_env[\"USE_MPS_DEVICE\"] = str(args.use_mps_device).lower()',\n", - " 'from_author': False},\n", - " {'body': 'Give instructions here maybe?',\n", - " 'diff_hunk': '@@ -208,7 +208,24 @@ def __init__(\\n self.distributed_type = DistributedType.NO\\n self.num_processes = 1\\n self.process_index = self.local_process_index = 0\\n- self.device = torch.device(\"cuda\" if torch.cuda.is_available() and not cpu else \"cpu\")\\n+ if os.environ.get(\"USE_MPS_DEVICE\", \"false\") == \"true\" and not cpu:\\n+ if not torch.backends.mps.is_available():\\n+ if not torch.backends.mps.is_built():\\n+ raise AssertionError(\\n+ \"MPS not available because the current PyTorch install was not \"\\n+ \"built with MPS enabled.\"',\n", - " 'from_author': False},\n", - " {'body': 'Hello, `use_mps_device` is a boolean argument using argparse action, so I think this should be fine.',\n", - " 'diff_hunk': '@@ -320,6 +326,7 @@ def simple_launcher(args):\\n \\n current_env = os.environ.copy()\\n current_env[\"USE_CPU\"] = str(args.cpu or args.use_cpu)\\n+ current_env[\"USE_MPS_DEVICE\"] = str(args.use_mps_device).lower()',\n", - " 'from_author': True},\n", - " {'body': 'Done.',\n", - " 'diff_hunk': '@@ -208,7 +208,24 @@ def __init__(\\n self.distributed_type = DistributedType.NO\\n self.num_processes = 1\\n self.process_index = self.local_process_index = 0\\n- self.device = torch.device(\"cuda\" if torch.cuda.is_available() and not cpu else \"cpu\")\\n+ if os.environ.get(\"USE_MPS_DEVICE\", \"false\") == \"true\" and not cpu:\\n+ if not torch.backends.mps.is_available():\\n+ if not torch.backends.mps.is_built():\\n+ raise AssertionError(\\n+ \"MPS not available because the current PyTorch install was not \"\\n+ \"built with MPS enabled.\"',\n", - " 'from_author': True},\n", - " {'body': 'Done.',\n", - " 'diff_hunk': '@@ -320,6 +326,7 @@ def simple_launcher(args):\\n \\n current_env = os.environ.copy()\\n current_env[\"USE_CPU\"] = str(args.cpu or args.use_cpu)\\n+ current_env[\"USE_MPS_DEVICE\"] = str(args.use_mps_device).lower()',\n", - " 'from_author': True},\n", - " {'body': 'Done.',\n", - " 'diff_hunk': '@@ -272,7 +272,7 @@ def __init__(\\n self.native_amp = is_torch_version(\">=\", \"1.6\")\\n if not self.native_amp:\\n raise ValueError(err.format(mode=\"fp16\", requirement=\"PyTorch >= 1.6\"))\\n- if not torch.cuda.is_available():\\n+ if not torch.cuda.is_available() and os.environ.get(\"USE_MPS_DEVICE\", \"false\") == \"false\":',\n", - " 'from_author': True},\n", - " {'body': \"Hello, I didn't understand. Let's take this offline to better understand.\",\n", - " 'diff_hunk': '@@ -272,7 +272,7 @@ def __init__(\\n self.native_amp = is_torch_version(\">=\", \"1.6\")\\n if not self.native_amp:\\n raise ValueError(err.format(mode=\"fp16\", requirement=\"PyTorch >= 1.6\"))\\n- if not torch.cuda.is_available():\\n+ if not torch.cuda.is_available() and os.environ.get(\"USE_MPS_DEVICE\", \"false\") == \"false\":',\n", - " 'from_author': True},\n", - " {'body': 'Specify what specific question related to MPS needs to be answered here :) ',\n", - " 'diff_hunk': '@@ -0,0 +1,93 @@\\n+\\n+\\n+# Accelerated PyTorch Training on Mac\\n+With PyTorch v1.12 release, developers and researchers can take advantage of Apple silicon GPUs for significantly faster model training. \\n+This unlocks the ability to perform machine learning workflows like prototyping and fine-tuning locally, right on Mac.\\n+Apple\\'s Metal Performance Shaders (MPS) as a backend for PyTorch enables this. \\n+New device `mps` maps computational graphs and primitives on the MPS Graph framework and tuned kernels provided by MPS.\\n+For more information please refer Official documents [Introducing Accelerated PyTorch Training on Mac](https://pytorch.org/blog/introducing-accelerated-pytorch-training-on-mac/)\\n+and [MPS BACKEND](https://pytorch.org/docs/stable/notes/mps.html).\\n+\\n+### Benefits of Training and Inference using Apple M1 Chips\\n+1. Enables users to train larger networks or batch sizes locally\\n+2. Reduces data retrieval latency and provides the GPU with direct access to the full memory store due to unified memory architecture. \\n+Therefore, improving end-to-end performance.\\n+3. Reduces costs associated with cloud-based development or the need for additional local GPUs.\\n+\\n+### Pre-requisites: Installing torch with mps support\\n+Following this great medium article [GPU-Acceleration Comes to PyTorch on M1 Macs](https://medium.com/towards-data-science/gpu-acceleration-comes-to-pytorch-on-m1-macs-195c399efcc1),\\n+below are the steps to make sure torch is installed with `mps` support\\n+```\\n+# installing torch with m1 support on mac\\n+# install latest python from https://www.python.org/downloads/release/python-3106/\\n+# check the platform\\n+import platform\\n+platform.platform()\\n+\\'macOS-12.5-arm64-arm-64bit\\' \\n+# This is compatible as the macOS version is above 12.3 and it is the ARM64 native version of Python\\n+# install torch 1.12 via the below command\\n+# pip3 install torch torchvision torchaudio\\n+# test the `mps` device support\\n+import torch\\n+torch.has_mps\\n+True\\n+a = torch.Tensor([10,11])\\n+a.to(\"mps\")\\n+/Users/mac/ml/lib/python3.10/site-packages/torch/_tensor_str.py:103: UserWarning: The operator \\'aten::bitwise_and.Tensor_out\\' is not currently supported on the MPS backend and will fall back to run on the CPU. This may have performance implications. (Triggered internally at /Users/runner/work/pytorch/pytorch/pytorch/aten/src/ATen/mps/MPSFallback.mm:11.)\\n+ nonzero_finite_vals = torch.masked_select(tensor_view, torch.isfinite(tensor_view) & tensor_view.ne(0))\\n+tensor([10.0000, 11.0000], device=\\'mps:0\\')\\n+```\\n+\\n+## How it works out of the box\\n+On your machine(s) just run:\\n+\\n+```bash\\n+accelerate config\\n+```\\n+\\n+and answer the questions asked. This will generate a config file that will be used automatically to properly set the\\n+default options when doing',\n", - " 'from_author': False},\n", - " {'body': 'Resolving based on Sylvains note about the tests',\n", - " 'diff_hunk': '@@ -272,7 +272,7 @@ def __init__(\\n self.native_amp = is_torch_version(\">=\", \"1.6\")\\n if not self.native_amp:\\n raise ValueError(err.format(mode=\"fp16\", requirement=\"PyTorch >= 1.6\"))\\n- if not torch.cuda.is_available():\\n+ if not torch.cuda.is_available() and os.environ.get(\"USE_MPS_DEVICE\", \"false\") == \"false\":',\n", - " 'from_author': False},\n", - " {'body': \"Add a line under each header, I believe this is why the URLs aren't showing correctly and why everything's formatting seems to be broken: /static-proxy?url=https%3A%2F%2Fmoon-ci-docs.huggingface.co%2Fdocs%2Faccelerate%2Fpr_596%2Fen%2Fusage_guides%2Fmps%23accelerated-pytorch-training-on-mac%5C",\n", - " 'diff_hunk': '@@ -0,0 +1,93 @@\\n+\\n+\\n+# Accelerated PyTorch Training on Mac\\n+With PyTorch v1.12 release, developers and researchers can take advantage of Apple silicon GPUs for significantly faster model training. ',\n", - " 'from_author': False},\n", - " {'body': '```suggestion\\r\\nApple\\'s Metal Performance Shaders (MPS) as a backend for PyTorch enables this and can be used via the new `\"mps\"` device. This will map computational graphs and primitives on the MPS Graph framework and tuned kernels provided by MPS.\\r\\n```',\n", - " 'diff_hunk': '@@ -0,0 +1,93 @@\\n+\\n+\\n+# Accelerated PyTorch Training on Mac\\n+With PyTorch v1.12 release, developers and researchers can take advantage of Apple silicon GPUs for significantly faster model training. \\n+This unlocks the ability to perform machine learning workflows like prototyping and fine-tuning locally, right on Mac.\\n+Apple\\'s Metal Performance Shaders (MPS) as a backend for PyTorch enables this. \\n+New device `mps` maps computational graphs and primitives on the MPS Graph framework and tuned kernels provided by MPS.',\n", - " 'from_author': False},\n", - " {'body': 'Done.',\n", - " 'diff_hunk': '@@ -0,0 +1,93 @@\\n+\\n+\\n+# Accelerated PyTorch Training on Mac\\n+With PyTorch v1.12 release, developers and researchers can take advantage of Apple silicon GPUs for significantly faster model training. ',\n", - " 'from_author': True},\n", - " {'body': 'Done\\r\\n',\n", - " 'diff_hunk': '@@ -0,0 +1,93 @@\\n+\\n+\\n+# Accelerated PyTorch Training on Mac\\n+With PyTorch v1.12 release, developers and researchers can take advantage of Apple silicon GPUs for significantly faster model training. \\n+This unlocks the ability to perform machine learning workflows like prototyping and fine-tuning locally, right on Mac.\\n+Apple\\'s Metal Performance Shaders (MPS) as a backend for PyTorch enables this. \\n+New device `mps` maps computational graphs and primitives on the MPS Graph framework and tuned kernels provided by MPS.\\n+For more information please refer Official documents [Introducing Accelerated PyTorch Training on Mac](https://pytorch.org/blog/introducing-accelerated-pytorch-training-on-mac/)\\n+and [MPS BACKEND](https://pytorch.org/docs/stable/notes/mps.html).\\n+\\n+### Benefits of Training and Inference using Apple M1 Chips\\n+1. Enables users to train larger networks or batch sizes locally\\n+2. Reduces data retrieval latency and provides the GPU with direct access to the full memory store due to unified memory architecture. \\n+Therefore, improving end-to-end performance.\\n+3. Reduces costs associated with cloud-based development or the need for additional local GPUs.\\n+\\n+### Pre-requisites: Installing torch with mps support\\n+Following this great medium article [GPU-Acceleration Comes to PyTorch on M1 Macs](https://medium.com/towards-data-science/gpu-acceleration-comes-to-pytorch-on-m1-macs-195c399efcc1),\\n+below are the steps to make sure torch is installed with `mps` support\\n+```\\n+# installing torch with m1 support on mac\\n+# install latest python from https://www.python.org/downloads/release/python-3106/\\n+# check the platform\\n+import platform\\n+platform.platform()\\n+\\'macOS-12.5-arm64-arm-64bit\\' \\n+# This is compatible as the macOS version is above 12.3 and it is the ARM64 native version of Python\\n+# install torch 1.12 via the below command\\n+# pip3 install torch torchvision torchaudio\\n+# test the `mps` device support\\n+import torch\\n+torch.has_mps\\n+True\\n+a = torch.Tensor([10,11])\\n+a.to(\"mps\")\\n+/Users/mac/ml/lib/python3.10/site-packages/torch/_tensor_str.py:103: UserWarning: The operator \\'aten::bitwise_and.Tensor_out\\' is not currently supported on the MPS backend and will fall back to run on the CPU. This may have performance implications. (Triggered internally at /Users/runner/work/pytorch/pytorch/pytorch/aten/src/ATen/mps/MPSFallback.mm:11.)\\n+ nonzero_finite_vals = torch.masked_select(tensor_view, torch.isfinite(tensor_view) & tensor_view.ne(0))\\n+tensor([10.0000, 11.0000], device=\\'mps:0\\')\\n+```\\n+\\n+## How it works out of the box\\n+On your machine(s) just run:\\n+\\n+```bash\\n+accelerate config\\n+```\\n+\\n+and answer the questions asked. This will generate a config file that will be used automatically to properly set the\\n+default options when doing',\n", - " 'from_author': True},\n", - " {'body': 'We can break this up a bit to make it more digestible/readable + syntax highlighting\\r\\n\\r\\n```suggestion\\r\\n```python\\r\\n# installing torch with m1 support on mac\\r\\n# install latest python from https://www.python.org/downloads/release/python-3106/\\r\\n# check the platform\\r\\nimport platform\\r\\nplatform.platform()\\r\\n```\\r\\n```python out\\r\\n\\'macOS-12.5-arm64-arm-64bit\\' \\r\\n```\\r\\n```python\\r\\n# This is compatible as the macOS version is above 12.3 and it is the ARM64 native version of Python\\r\\n# install torch 1.12 via the below command\\r\\n# pip3 install torch torchvision torchaudio\\r\\n# test the `mps` device support\\r\\nimport torch\\r\\ntorch.has_mps\\r\\n```\\r\\n```python out\\r\\nTrue\\r\\n```\\r\\n```python\\r\\na = torch.Tensor([10,11])\\r\\na.to(\"mps\")\\r\\n```\\r\\n```python out\\r\\n/Users/mac/ml/lib/python3.10/site-packages/torch/_tensor_str.py:103: UserWarning: The operator \\'aten::bitwise_and.Tensor_out\\' is not currently supported on the MPS backend and will fall back to run on the CPU. This may have performance implications. (Triggered internally at /Users/runner/work/pytorch/pytorch/pytorch/aten/src/ATen/mps/MPSFallback.mm:11.)\\r\\n nonzero_finite_vals = torch.masked_select(tensor_view, torch.isfinite(tensor_view) & tensor_view.ne(0))\\r\\ntensor([10.0000, 11.0000], device=\\'mps:0\\')\\r\\n```\\r\\n```',\n", - " 'diff_hunk': '@@ -0,0 +1,100 @@\\n+\\n+\\n+# Accelerated PyTorch Training on Mac\\n+\\n+With PyTorch v1.12 release, developers and researchers can take advantage of Apple silicon GPUs for significantly faster model training. \\n+This unlocks the ability to perform machine learning workflows like prototyping and fine-tuning locally, right on Mac.\\n+Apple\\'s Metal Performance Shaders (MPS) as a backend for PyTorch enables this and can be used via the new `\"mps\"` device. \\n+This will map computational graphs and primitives on the MPS Graph framework and tuned kernels provided by MPS.\\n+For more information please refer official documents [Introducing Accelerated PyTorch Training on Mac](https://pytorch.org/blog/introducing-accelerated-pytorch-training-on-mac/)\\n+and [MPS BACKEND](https://pytorch.org/docs/stable/notes/mps.html).\\n+\\n+### Benefits of Training and Inference using Apple M1 Chips\\n+\\n+1. Enables users to train larger networks or batch sizes locally\\n+2. Reduces data retrieval latency and provides the GPU with direct access to the full memory store due to unified memory architecture. \\n+Therefore, improving end-to-end performance.\\n+3. Reduces costs associated with cloud-based development or the need for additional local GPUs.\\n+\\n+### Pre-requisites: Installing torch with mps support\\n+\\n+Following this great medium article [GPU-Acceleration Comes to PyTorch on M1 Macs](https://medium.com/towards-data-science/gpu-acceleration-comes-to-pytorch-on-m1-macs-195c399efcc1),\\n+below are the steps to make sure torch is installed with `mps` support.\\n+\\n+```\\n+# installing torch with m1 support on mac\\n+# install latest python from https://www.python.org/downloads/release/python-3106/\\n+# check the platform\\n+import platform\\n+platform.platform()\\n+\\'macOS-12.5-arm64-arm-64bit\\' \\n+# This is compatible as the macOS version is above 12.3 and it is the ARM64 native version of Python\\n+# install torch 1.12 via the below command\\n+# pip3 install torch torchvision torchaudio\\n+# test the `mps` device support\\n+import torch\\n+torch.has_mps\\n+True\\n+a = torch.Tensor([10,11])\\n+a.to(\"mps\")\\n+/Users/mac/ml/lib/python3.10/site-packages/torch/_tensor_str.py:103: UserWarning: The operator \\'aten::bitwise_and.Tensor_out\\' is not currently supported on the MPS backend and will fall back to run on the CPU. This may have performance implications. (Triggered internally at /Users/runner/work/pytorch/pytorch/pytorch/aten/src/ATen/mps/MPSFallback.mm:11.)\\n+ nonzero_finite_vals = torch.masked_select(tensor_view, torch.isfinite(tensor_view) & tensor_view.ne(0))\\n+tensor([10.0000, 11.0000], device=\\'mps:0\\')\\n+```',\n", - " 'from_author': False},\n", - " {'body': \"I can't make a suggestion for this one, but I recommend something like this:\\r\\n\\r\\nand answer the questions asked, specifically choose `MPS` for the query:\\r\\n```\\r\\nWhich type of machine are you using?\\r\\n```\\r\\nThis will generate a config file that will be used automatically to properly set the\\r\\ndefault options when doing\",\n", - " 'diff_hunk': '@@ -0,0 +1,100 @@\\n+\\n+\\n+# Accelerated PyTorch Training on Mac\\n+\\n+With PyTorch v1.12 release, developers and researchers can take advantage of Apple silicon GPUs for significantly faster model training. \\n+This unlocks the ability to perform machine learning workflows like prototyping and fine-tuning locally, right on Mac.\\n+Apple\\'s Metal Performance Shaders (MPS) as a backend for PyTorch enables this and can be used via the new `\"mps\"` device. \\n+This will map computational graphs and primitives on the MPS Graph framework and tuned kernels provided by MPS.\\n+For more information please refer official documents [Introducing Accelerated PyTorch Training on Mac](https://pytorch.org/blog/introducing-accelerated-pytorch-training-on-mac/)\\n+and [MPS BACKEND](https://pytorch.org/docs/stable/notes/mps.html).\\n+\\n+### Benefits of Training and Inference using Apple M1 Chips\\n+\\n+1. Enables users to train larger networks or batch sizes locally\\n+2. Reduces data retrieval latency and provides the GPU with direct access to the full memory store due to unified memory architecture. \\n+Therefore, improving end-to-end performance.\\n+3. Reduces costs associated with cloud-based development or the need for additional local GPUs.\\n+\\n+### Pre-requisites: Installing torch with mps support\\n+\\n+Following this great medium article [GPU-Acceleration Comes to PyTorch on M1 Macs](https://medium.com/towards-data-science/gpu-acceleration-comes-to-pytorch-on-m1-macs-195c399efcc1),\\n+below are the steps to make sure torch is installed with `mps` support.\\n+\\n+```\\n+# installing torch with m1 support on mac\\n+# install latest python from https://www.python.org/downloads/release/python-3106/\\n+# check the platform\\n+import platform\\n+platform.platform()\\n+\\'macOS-12.5-arm64-arm-64bit\\' \\n+# This is compatible as the macOS version is above 12.3 and it is the ARM64 native version of Python\\n+# install torch 1.12 via the below command\\n+# pip3 install torch torchvision torchaudio\\n+# test the `mps` device support\\n+import torch\\n+torch.has_mps\\n+True\\n+a = torch.Tensor([10,11])\\n+a.to(\"mps\")\\n+/Users/mac/ml/lib/python3.10/site-packages/torch/_tensor_str.py:103: UserWarning: The operator \\'aten::bitwise_and.Tensor_out\\' is not currently supported on the MPS backend and will fall back to run on the CPU. This may have performance implications. (Triggered internally at /Users/runner/work/pytorch/pytorch/pytorch/aten/src/ATen/mps/MPSFallback.mm:11.)\\n+ nonzero_finite_vals = torch.masked_select(tensor_view, torch.isfinite(tensor_view) & tensor_view.ne(0))\\n+tensor([10.0000, 11.0000], device=\\'mps:0\\')\\n+```\\n+\\n+## How it works out of the box\\n+\\n+On your machine(s) just run:\\n+\\n+```bash\\n+accelerate config\\n+```\\n+\\n+and answer the questions asked, specifically choose `MPS` for the query `Which type of machine are you using?` . \\n+This will generate a config file that will be used automatically to properly set the\\n+default options when doing',\n", - " 'from_author': False},\n", - " {'body': \"Move this section to after you've ran `accelerate config` and state that this is what the generated configuration looks like\",\n", - " 'diff_hunk': '@@ -0,0 +1,100 @@\\n+\\n+\\n+# Accelerated PyTorch Training on Mac\\n+\\n+With PyTorch v1.12 release, developers and researchers can take advantage of Apple silicon GPUs for significantly faster model training. \\n+This unlocks the ability to perform machine learning workflows like prototyping and fine-tuning locally, right on Mac.\\n+Apple\\'s Metal Performance Shaders (MPS) as a backend for PyTorch enables this and can be used via the new `\"mps\"` device. \\n+This will map computational graphs and primitives on the MPS Graph framework and tuned kernels provided by MPS.\\n+For more information please refer official documents [Introducing Accelerated PyTorch Training on Mac](https://pytorch.org/blog/introducing-accelerated-pytorch-training-on-mac/)\\n+and [MPS BACKEND](https://pytorch.org/docs/stable/notes/mps.html).\\n+\\n+### Benefits of Training and Inference using Apple M1 Chips\\n+\\n+1. Enables users to train larger networks or batch sizes locally\\n+2. Reduces data retrieval latency and provides the GPU with direct access to the full memory store due to unified memory architecture. \\n+Therefore, improving end-to-end performance.\\n+3. Reduces costs associated with cloud-based development or the need for additional local GPUs.\\n+\\n+### Pre-requisites: Installing torch with mps support\\n+\\n+Following this great medium article [GPU-Acceleration Comes to PyTorch on M1 Macs](https://medium.com/towards-data-science/gpu-acceleration-comes-to-pytorch-on-m1-macs-195c399efcc1),\\n+below are the steps to make sure torch is installed with `mps` support.\\n+\\n+```\\n+# installing torch with m1 support on mac\\n+# install latest python from https://www.python.org/downloads/release/python-3106/\\n+# check the platform\\n+import platform\\n+platform.platform()\\n+\\'macOS-12.5-arm64-arm-64bit\\' \\n+# This is compatible as the macOS version is above 12.3 and it is the ARM64 native version of Python\\n+# install torch 1.12 via the below command\\n+# pip3 install torch torchvision torchaudio\\n+# test the `mps` device support\\n+import torch\\n+torch.has_mps\\n+True\\n+a = torch.Tensor([10,11])\\n+a.to(\"mps\")\\n+/Users/mac/ml/lib/python3.10/site-packages/torch/_tensor_str.py:103: UserWarning: The operator \\'aten::bitwise_and.Tensor_out\\' is not currently supported on the MPS backend and will fall back to run on the CPU. This may have performance implications. (Triggered internally at /Users/runner/work/pytorch/pytorch/pytorch/aten/src/ATen/mps/MPSFallback.mm:11.)\\n+ nonzero_finite_vals = torch.masked_select(tensor_view, torch.isfinite(tensor_view) & tensor_view.ne(0))\\n+tensor([10.0000, 11.0000], device=\\'mps:0\\')\\n+```\\n+\\n+## How it works out of the box\\n+\\n+On your machine(s) just run:\\n+\\n+```bash\\n+accelerate config\\n+```\\n+\\n+and answer the questions asked, specifically choose `MPS` for the query `Which type of machine are you using?` . \\n+This will generate a config file that will be used automatically to properly set the\\n+default options when doing\\n+\\n+```bash\\n+accelerate launch my_script.py --args_to_my_script\\n+```\\n+\\n+For instance, here is how you would run the CV example (from the root of the repo) with MPS enabled:\\n+```bash\\n+compute_environment: LOCAL_MACHINE\\n+deepspeed_config: {}\\n+distributed_type: MPS\\n+downcast_bf16: \\'no\\'\\n+fsdp_config: {}\\n+machine_rank: 0\\n+main_process_ip: null\\n+main_process_port: null\\n+main_training_function: main\\n+mixed_precision: \\'no\\'\\n+num_machines: 1\\n+num_processes: 1\\n+use_cpu: false\\n+```',\n", - " 'from_author': False},\n", - " {'body': 'I feel the above block is fine as it might be too distracting to have multiple blocks which can break the flow when the user is reading ',\n", - " 'diff_hunk': '@@ -0,0 +1,100 @@\\n+\\n+\\n+# Accelerated PyTorch Training on Mac\\n+\\n+With PyTorch v1.12 release, developers and researchers can take advantage of Apple silicon GPUs for significantly faster model training. \\n+This unlocks the ability to perform machine learning workflows like prototyping and fine-tuning locally, right on Mac.\\n+Apple\\'s Metal Performance Shaders (MPS) as a backend for PyTorch enables this and can be used via the new `\"mps\"` device. \\n+This will map computational graphs and primitives on the MPS Graph framework and tuned kernels provided by MPS.\\n+For more information please refer official documents [Introducing Accelerated PyTorch Training on Mac](https://pytorch.org/blog/introducing-accelerated-pytorch-training-on-mac/)\\n+and [MPS BACKEND](https://pytorch.org/docs/stable/notes/mps.html).\\n+\\n+### Benefits of Training and Inference using Apple M1 Chips\\n+\\n+1. Enables users to train larger networks or batch sizes locally\\n+2. Reduces data retrieval latency and provides the GPU with direct access to the full memory store due to unified memory architecture. \\n+Therefore, improving end-to-end performance.\\n+3. Reduces costs associated with cloud-based development or the need for additional local GPUs.\\n+\\n+### Pre-requisites: Installing torch with mps support\\n+\\n+Following this great medium article [GPU-Acceleration Comes to PyTorch on M1 Macs](https://medium.com/towards-data-science/gpu-acceleration-comes-to-pytorch-on-m1-macs-195c399efcc1),\\n+below are the steps to make sure torch is installed with `mps` support.\\n+\\n+```\\n+# installing torch with m1 support on mac\\n+# install latest python from https://www.python.org/downloads/release/python-3106/\\n+# check the platform\\n+import platform\\n+platform.platform()\\n+\\'macOS-12.5-arm64-arm-64bit\\' \\n+# This is compatible as the macOS version is above 12.3 and it is the ARM64 native version of Python\\n+# install torch 1.12 via the below command\\n+# pip3 install torch torchvision torchaudio\\n+# test the `mps` device support\\n+import torch\\n+torch.has_mps\\n+True\\n+a = torch.Tensor([10,11])\\n+a.to(\"mps\")\\n+/Users/mac/ml/lib/python3.10/site-packages/torch/_tensor_str.py:103: UserWarning: The operator \\'aten::bitwise_and.Tensor_out\\' is not currently supported on the MPS backend and will fall back to run on the CPU. This may have performance implications. (Triggered internally at /Users/runner/work/pytorch/pytorch/pytorch/aten/src/ATen/mps/MPSFallback.mm:11.)\\n+ nonzero_finite_vals = torch.masked_select(tensor_view, torch.isfinite(tensor_view) & tensor_view.ne(0))\\n+tensor([10.0000, 11.0000], device=\\'mps:0\\')\\n+```',\n", - " 'from_author': True},\n", - " {'body': 'But then it would be breaking the flow of commands. I have followed generic commands `accelerate config` and `accelerate launch script.py args` with specific example so that user can quickly get the gist. ',\n", - " 'diff_hunk': '@@ -0,0 +1,100 @@\\n+\\n+\\n+# Accelerated PyTorch Training on Mac\\n+\\n+With PyTorch v1.12 release, developers and researchers can take advantage of Apple silicon GPUs for significantly faster model training. \\n+This unlocks the ability to perform machine learning workflows like prototyping and fine-tuning locally, right on Mac.\\n+Apple\\'s Metal Performance Shaders (MPS) as a backend for PyTorch enables this and can be used via the new `\"mps\"` device. \\n+This will map computational graphs and primitives on the MPS Graph framework and tuned kernels provided by MPS.\\n+For more information please refer official documents [Introducing Accelerated PyTorch Training on Mac](https://pytorch.org/blog/introducing-accelerated-pytorch-training-on-mac/)\\n+and [MPS BACKEND](https://pytorch.org/docs/stable/notes/mps.html).\\n+\\n+### Benefits of Training and Inference using Apple M1 Chips\\n+\\n+1. Enables users to train larger networks or batch sizes locally\\n+2. Reduces data retrieval latency and provides the GPU with direct access to the full memory store due to unified memory architecture. \\n+Therefore, improving end-to-end performance.\\n+3. Reduces costs associated with cloud-based development or the need for additional local GPUs.\\n+\\n+### Pre-requisites: Installing torch with mps support\\n+\\n+Following this great medium article [GPU-Acceleration Comes to PyTorch on M1 Macs](https://medium.com/towards-data-science/gpu-acceleration-comes-to-pytorch-on-m1-macs-195c399efcc1),\\n+below are the steps to make sure torch is installed with `mps` support.\\n+\\n+```\\n+# installing torch with m1 support on mac\\n+# install latest python from https://www.python.org/downloads/release/python-3106/\\n+# check the platform\\n+import platform\\n+platform.platform()\\n+\\'macOS-12.5-arm64-arm-64bit\\' \\n+# This is compatible as the macOS version is above 12.3 and it is the ARM64 native version of Python\\n+# install torch 1.12 via the below command\\n+# pip3 install torch torchvision torchaudio\\n+# test the `mps` device support\\n+import torch\\n+torch.has_mps\\n+True\\n+a = torch.Tensor([10,11])\\n+a.to(\"mps\")\\n+/Users/mac/ml/lib/python3.10/site-packages/torch/_tensor_str.py:103: UserWarning: The operator \\'aten::bitwise_and.Tensor_out\\' is not currently supported on the MPS backend and will fall back to run on the CPU. This may have performance implications. (Triggered internally at /Users/runner/work/pytorch/pytorch/pytorch/aten/src/ATen/mps/MPSFallback.mm:11.)\\n+ nonzero_finite_vals = torch.masked_select(tensor_view, torch.isfinite(tensor_view) & tensor_view.ne(0))\\n+tensor([10.0000, 11.0000], device=\\'mps:0\\')\\n+```\\n+\\n+## How it works out of the box\\n+\\n+On your machine(s) just run:\\n+\\n+```bash\\n+accelerate config\\n+```\\n+\\n+and answer the questions asked, specifically choose `MPS` for the query `Which type of machine are you using?` . \\n+This will generate a config file that will be used automatically to properly set the\\n+default options when doing\\n+\\n+```bash\\n+accelerate launch my_script.py --args_to_my_script\\n+```\\n+\\n+For instance, here is how you would run the CV example (from the root of the repo) with MPS enabled:\\n+```bash\\n+compute_environment: LOCAL_MACHINE\\n+deepspeed_config: {}\\n+distributed_type: MPS\\n+downcast_bf16: \\'no\\'\\n+fsdp_config: {}\\n+machine_rank: 0\\n+main_process_ip: null\\n+main_process_port: null\\n+main_training_function: main\\n+mixed_precision: \\'no\\'\\n+num_machines: 1\\n+num_processes: 1\\n+use_cpu: false\\n+```',\n", - " 'from_author': True},\n", - " {'body': \"If you're using comments inside a code-block in the documentation, you are doing something wrong; you can put the comments as sentences **then** enter a code-block. Also you should use the doctest syntax to highlight the inputs and outputs, as it's not clear otherwise.\",\n", - " 'diff_hunk': '@@ -0,0 +1,100 @@\\n+\\n+\\n+# Accelerated PyTorch Training on Mac\\n+\\n+With PyTorch v1.12 release, developers and researchers can take advantage of Apple silicon GPUs for significantly faster model training. \\n+This unlocks the ability to perform machine learning workflows like prototyping and fine-tuning locally, right on Mac.\\n+Apple\\'s Metal Performance Shaders (MPS) as a backend for PyTorch enables this and can be used via the new `\"mps\"` device. \\n+This will map computational graphs and primitives on the MPS Graph framework and tuned kernels provided by MPS.\\n+For more information please refer official documents [Introducing Accelerated PyTorch Training on Mac](https://pytorch.org/blog/introducing-accelerated-pytorch-training-on-mac/)\\n+and [MPS BACKEND](https://pytorch.org/docs/stable/notes/mps.html).\\n+\\n+### Benefits of Training and Inference using Apple M1 Chips\\n+\\n+1. Enables users to train larger networks or batch sizes locally\\n+2. Reduces data retrieval latency and provides the GPU with direct access to the full memory store due to unified memory architecture. \\n+Therefore, improving end-to-end performance.\\n+3. Reduces costs associated with cloud-based development or the need for additional local GPUs.\\n+\\n+### Pre-requisites: Installing torch with mps support\\n+\\n+Following this great medium article [GPU-Acceleration Comes to PyTorch on M1 Macs](https://medium.com/towards-data-science/gpu-acceleration-comes-to-pytorch-on-m1-macs-195c399efcc1),\\n+below are the steps to make sure torch is installed with `mps` support.\\n+\\n+```\\n+# installing torch with m1 support on mac\\n+# install latest python from https://www.python.org/downloads/release/python-3106/\\n+# check the platform\\n+import platform\\n+platform.platform()\\n+\\'macOS-12.5-arm64-arm-64bit\\' \\n+# This is compatible as the macOS version is above 12.3 and it is the ARM64 native version of Python\\n+# install torch 1.12 via the below command\\n+# pip3 install torch torchvision torchaudio\\n+# test the `mps` device support\\n+import torch\\n+torch.has_mps\\n+True\\n+a = torch.Tensor([10,11])\\n+a.to(\"mps\")\\n+/Users/mac/ml/lib/python3.10/site-packages/torch/_tensor_str.py:103: UserWarning: The operator \\'aten::bitwise_and.Tensor_out\\' is not currently supported on the MPS backend and will fall back to run on the CPU. This may have performance implications. (Triggered internally at /Users/runner/work/pytorch/pytorch/pytorch/aten/src/ATen/mps/MPSFallback.mm:11.)\\n+ nonzero_finite_vals = torch.masked_select(tensor_view, torch.isfinite(tensor_view) & tensor_view.ne(0))\\n+tensor([10.0000, 11.0000], device=\\'mps:0\\')\\n+```',\n", - " 'from_author': False},\n", - " {'body': \"I'd probably change it to be:\\r\\n\\r\\n```\\r\\nThis will generate a config file that will be used automatically to properly set the default options when doing `accelerate launch`, such as the one shown below:\\r\\n```\\r\\nAnd just don't show the full `accelerate launch` snippet, since here we want to focus on the config not the launch.\\r\\n\\r\\nThe next part would then say:\\r\\n```\\r\\nAfter this configuration has been made, here is how you run the CV example (from the root of the repo) with MPS enabled:\\r\\n```\\r\\n\\r\\nThis way each part is focused on what matters to that section\",\n", - " 'diff_hunk': '@@ -0,0 +1,100 @@\\n+\\n+\\n+# Accelerated PyTorch Training on Mac\\n+\\n+With PyTorch v1.12 release, developers and researchers can take advantage of Apple silicon GPUs for significantly faster model training. \\n+This unlocks the ability to perform machine learning workflows like prototyping and fine-tuning locally, right on Mac.\\n+Apple\\'s Metal Performance Shaders (MPS) as a backend for PyTorch enables this and can be used via the new `\"mps\"` device. \\n+This will map computational graphs and primitives on the MPS Graph framework and tuned kernels provided by MPS.\\n+For more information please refer official documents [Introducing Accelerated PyTorch Training on Mac](https://pytorch.org/blog/introducing-accelerated-pytorch-training-on-mac/)\\n+and [MPS BACKEND](https://pytorch.org/docs/stable/notes/mps.html).\\n+\\n+### Benefits of Training and Inference using Apple M1 Chips\\n+\\n+1. Enables users to train larger networks or batch sizes locally\\n+2. Reduces data retrieval latency and provides the GPU with direct access to the full memory store due to unified memory architecture. \\n+Therefore, improving end-to-end performance.\\n+3. Reduces costs associated with cloud-based development or the need for additional local GPUs.\\n+\\n+### Pre-requisites: Installing torch with mps support\\n+\\n+Following this great medium article [GPU-Acceleration Comes to PyTorch on M1 Macs](https://medium.com/towards-data-science/gpu-acceleration-comes-to-pytorch-on-m1-macs-195c399efcc1),\\n+below are the steps to make sure torch is installed with `mps` support.\\n+\\n+```\\n+# installing torch with m1 support on mac\\n+# install latest python from https://www.python.org/downloads/release/python-3106/\\n+# check the platform\\n+import platform\\n+platform.platform()\\n+\\'macOS-12.5-arm64-arm-64bit\\' \\n+# This is compatible as the macOS version is above 12.3 and it is the ARM64 native version of Python\\n+# install torch 1.12 via the below command\\n+# pip3 install torch torchvision torchaudio\\n+# test the `mps` device support\\n+import torch\\n+torch.has_mps\\n+True\\n+a = torch.Tensor([10,11])\\n+a.to(\"mps\")\\n+/Users/mac/ml/lib/python3.10/site-packages/torch/_tensor_str.py:103: UserWarning: The operator \\'aten::bitwise_and.Tensor_out\\' is not currently supported on the MPS backend and will fall back to run on the CPU. This may have performance implications. (Triggered internally at /Users/runner/work/pytorch/pytorch/pytorch/aten/src/ATen/mps/MPSFallback.mm:11.)\\n+ nonzero_finite_vals = torch.masked_select(tensor_view, torch.isfinite(tensor_view) & tensor_view.ne(0))\\n+tensor([10.0000, 11.0000], device=\\'mps:0\\')\\n+```\\n+\\n+## How it works out of the box\\n+\\n+On your machine(s) just run:\\n+\\n+```bash\\n+accelerate config\\n+```\\n+\\n+and answer the questions asked, specifically choose `MPS` for the query `Which type of machine are you using?` . \\n+This will generate a config file that will be used automatically to properly set the\\n+default options when doing\\n+\\n+```bash\\n+accelerate launch my_script.py --args_to_my_script\\n+```\\n+\\n+For instance, here is how you would run the CV example (from the root of the repo) with MPS enabled:\\n+```bash\\n+compute_environment: LOCAL_MACHINE\\n+deepspeed_config: {}\\n+distributed_type: MPS\\n+downcast_bf16: \\'no\\'\\n+fsdp_config: {}\\n+machine_rank: 0\\n+main_process_ip: null\\n+main_process_port: null\\n+main_training_function: main\\n+mixed_precision: \\'no\\'\\n+num_machines: 1\\n+num_processes: 1\\n+use_cpu: false\\n+```',\n", - " 'from_author': False},\n", - " {'body': 'Oh, okay, then maybe I can just leave it at the link to the medium blogpost instead of summarizing it this way. ',\n", - " 'diff_hunk': '@@ -0,0 +1,100 @@\\n+\\n+\\n+# Accelerated PyTorch Training on Mac\\n+\\n+With PyTorch v1.12 release, developers and researchers can take advantage of Apple silicon GPUs for significantly faster model training. \\n+This unlocks the ability to perform machine learning workflows like prototyping and fine-tuning locally, right on Mac.\\n+Apple\\'s Metal Performance Shaders (MPS) as a backend for PyTorch enables this and can be used via the new `\"mps\"` device. \\n+This will map computational graphs and primitives on the MPS Graph framework and tuned kernels provided by MPS.\\n+For more information please refer official documents [Introducing Accelerated PyTorch Training on Mac](https://pytorch.org/blog/introducing-accelerated-pytorch-training-on-mac/)\\n+and [MPS BACKEND](https://pytorch.org/docs/stable/notes/mps.html).\\n+\\n+### Benefits of Training and Inference using Apple M1 Chips\\n+\\n+1. Enables users to train larger networks or batch sizes locally\\n+2. Reduces data retrieval latency and provides the GPU with direct access to the full memory store due to unified memory architecture. \\n+Therefore, improving end-to-end performance.\\n+3. Reduces costs associated with cloud-based development or the need for additional local GPUs.\\n+\\n+### Pre-requisites: Installing torch with mps support\\n+\\n+Following this great medium article [GPU-Acceleration Comes to PyTorch on M1 Macs](https://medium.com/towards-data-science/gpu-acceleration-comes-to-pytorch-on-m1-macs-195c399efcc1),\\n+below are the steps to make sure torch is installed with `mps` support.\\n+\\n+```\\n+# installing torch with m1 support on mac\\n+# install latest python from https://www.python.org/downloads/release/python-3106/\\n+# check the platform\\n+import platform\\n+platform.platform()\\n+\\'macOS-12.5-arm64-arm-64bit\\' \\n+# This is compatible as the macOS version is above 12.3 and it is the ARM64 native version of Python\\n+# install torch 1.12 via the below command\\n+# pip3 install torch torchvision torchaudio\\n+# test the `mps` device support\\n+import torch\\n+torch.has_mps\\n+True\\n+a = torch.Tensor([10,11])\\n+a.to(\"mps\")\\n+/Users/mac/ml/lib/python3.10/site-packages/torch/_tensor_str.py:103: UserWarning: The operator \\'aten::bitwise_and.Tensor_out\\' is not currently supported on the MPS backend and will fall back to run on the CPU. This may have performance implications. (Triggered internally at /Users/runner/work/pytorch/pytorch/pytorch/aten/src/ATen/mps/MPSFallback.mm:11.)\\n+ nonzero_finite_vals = torch.masked_select(tensor_view, torch.isfinite(tensor_view) & tensor_view.ne(0))\\n+tensor([10.0000, 11.0000], device=\\'mps:0\\')\\n+```',\n", - " 'from_author': True},\n", - " {'body': 'Done. Removed this entire block and just left it with a link to the medium blogpost explaining this in detail. ',\n", - " 'diff_hunk': '@@ -0,0 +1,100 @@\\n+\\n+\\n+# Accelerated PyTorch Training on Mac\\n+\\n+With PyTorch v1.12 release, developers and researchers can take advantage of Apple silicon GPUs for significantly faster model training. \\n+This unlocks the ability to perform machine learning workflows like prototyping and fine-tuning locally, right on Mac.\\n+Apple\\'s Metal Performance Shaders (MPS) as a backend for PyTorch enables this and can be used via the new `\"mps\"` device. \\n+This will map computational graphs and primitives on the MPS Graph framework and tuned kernels provided by MPS.\\n+For more information please refer official documents [Introducing Accelerated PyTorch Training on Mac](https://pytorch.org/blog/introducing-accelerated-pytorch-training-on-mac/)\\n+and [MPS BACKEND](https://pytorch.org/docs/stable/notes/mps.html).\\n+\\n+### Benefits of Training and Inference using Apple M1 Chips\\n+\\n+1. Enables users to train larger networks or batch sizes locally\\n+2. Reduces data retrieval latency and provides the GPU with direct access to the full memory store due to unified memory architecture. \\n+Therefore, improving end-to-end performance.\\n+3. Reduces costs associated with cloud-based development or the need for additional local GPUs.\\n+\\n+### Pre-requisites: Installing torch with mps support\\n+\\n+Following this great medium article [GPU-Acceleration Comes to PyTorch on M1 Macs](https://medium.com/towards-data-science/gpu-acceleration-comes-to-pytorch-on-m1-macs-195c399efcc1),\\n+below are the steps to make sure torch is installed with `mps` support.\\n+\\n+```\\n+# installing torch with m1 support on mac\\n+# install latest python from https://www.python.org/downloads/release/python-3106/\\n+# check the platform\\n+import platform\\n+platform.platform()\\n+\\'macOS-12.5-arm64-arm-64bit\\' \\n+# This is compatible as the macOS version is above 12.3 and it is the ARM64 native version of Python\\n+# install torch 1.12 via the below command\\n+# pip3 install torch torchvision torchaudio\\n+# test the `mps` device support\\n+import torch\\n+torch.has_mps\\n+True\\n+a = torch.Tensor([10,11])\\n+a.to(\"mps\")\\n+/Users/mac/ml/lib/python3.10/site-packages/torch/_tensor_str.py:103: UserWarning: The operator \\'aten::bitwise_and.Tensor_out\\' is not currently supported on the MPS backend and will fall back to run on the CPU. This may have performance implications. (Triggered internally at /Users/runner/work/pytorch/pytorch/pytorch/aten/src/ATen/mps/MPSFallback.mm:11.)\\n+ nonzero_finite_vals = torch.masked_select(tensor_view, torch.isfinite(tensor_view) & tensor_view.ne(0))\\n+tensor([10.0000, 11.0000], device=\\'mps:0\\')\\n+```',\n", - " 'from_author': True},\n", - " {'body': 'This should be rewritten or removed now that the PyTorch issue has been solved (in nightly at least), no?',\n", - " 'diff_hunk': '@@ -0,0 +1,81 @@\\n+\\n+\\n+# Accelerated PyTorch Training on Mac\\n+\\n+With PyTorch v1.12 release, developers and researchers can take advantage of Apple silicon GPUs for significantly faster model training. \\n+This unlocks the ability to perform machine learning workflows like prototyping and fine-tuning locally, right on Mac.\\n+Apple\\'s Metal Performance Shaders (MPS) as a backend for PyTorch enables this and can be used via the new `\"mps\"` device. \\n+This will map computational graphs and primitives on the MPS Graph framework and tuned kernels provided by MPS.\\n+For more information please refer official documents [Introducing Accelerated PyTorch Training on Mac](https://pytorch.org/blog/introducing-accelerated-pytorch-training-on-mac/)\\n+and [MPS BACKEND](https://pytorch.org/docs/stable/notes/mps.html).\\n+\\n+### Benefits of Training and Inference using Apple M1 Chips\\n+\\n+1. Enables users to train larger networks or batch sizes locally\\n+2. Reduces data retrieval latency and provides the GPU with direct access to the full memory store due to unified memory architecture. \\n+Therefore, improving end-to-end performance.\\n+3. Reduces costs associated with cloud-based development or the need for additional local GPUs.\\n+\\n+**Pre-requisites**: To install torch with mps support, \\n+please follow this nice medium article [GPU-Acceleration Comes to PyTorch on M1 Macs](https://medium.com/towards-data-science/gpu-acceleration-comes-to-pytorch-on-m1-macs-195c399efcc1).\\n+\\n+\\n+## How it works out of the box\\n+\\n+On your machine(s) just run:\\n+\\n+```bash\\n+accelerate config\\n+```\\n+\\n+and answer the questions asked, specifically choose `MPS` for the query:\\n+\\n+```\\n+ Which type of machine are you using?. \\n+ ```\\n+\\n+This will generate a config file that will be used automatically to properly set \\n+the default options when doing `accelerate launch`, such as the one shown below:\\n+\\n+```bash\\n+compute_environment: LOCAL_MACHINE\\n+deepspeed_config: {}\\n+distributed_type: MPS\\n+downcast_bf16: \\'no\\'\\n+fsdp_config: {}\\n+machine_rank: 0\\n+main_process_ip: null\\n+main_process_port: null\\n+main_training_function: main\\n+mixed_precision: \\'no\\'\\n+num_machines: 1\\n+num_processes: 1\\n+use_cpu: false\\n+```\\n+\\n+After this configuration has been made, here is how you run the CV example \\n+(from the root of the repo) with MPS enabled:\\n+\\n+```bash\\n+accelerate launch /examples/cv_example.py --data_dir images\\n+```\\n+\\n+## A few caveats to be aware of\\n+\\n+1. For `nlp_example.py` the metrics are too bad when compared to CPU-only training. \\n+This means certain operations in BERT model are going wrong using mps device and this needs to be fixed by PyTorch.',\n", - " 'from_author': False},\n", - " {'body': 'Done. Please refer this PR #629 ',\n", - " 'diff_hunk': '@@ -0,0 +1,81 @@\\n+\\n+\\n+# Accelerated PyTorch Training on Mac\\n+\\n+With PyTorch v1.12 release, developers and researchers can take advantage of Apple silicon GPUs for significantly faster model training. \\n+This unlocks the ability to perform machine learning workflows like prototyping and fine-tuning locally, right on Mac.\\n+Apple\\'s Metal Performance Shaders (MPS) as a backend for PyTorch enables this and can be used via the new `\"mps\"` device. \\n+This will map computational graphs and primitives on the MPS Graph framework and tuned kernels provided by MPS.\\n+For more information please refer official documents [Introducing Accelerated PyTorch Training on Mac](https://pytorch.org/blog/introducing-accelerated-pytorch-training-on-mac/)\\n+and [MPS BACKEND](https://pytorch.org/docs/stable/notes/mps.html).\\n+\\n+### Benefits of Training and Inference using Apple M1 Chips\\n+\\n+1. Enables users to train larger networks or batch sizes locally\\n+2. Reduces data retrieval latency and provides the GPU with direct access to the full memory store due to unified memory architecture. \\n+Therefore, improving end-to-end performance.\\n+3. Reduces costs associated with cloud-based development or the need for additional local GPUs.\\n+\\n+**Pre-requisites**: To install torch with mps support, \\n+please follow this nice medium article [GPU-Acceleration Comes to PyTorch on M1 Macs](https://medium.com/towards-data-science/gpu-acceleration-comes-to-pytorch-on-m1-macs-195c399efcc1).\\n+\\n+\\n+## How it works out of the box\\n+\\n+On your machine(s) just run:\\n+\\n+```bash\\n+accelerate config\\n+```\\n+\\n+and answer the questions asked, specifically choose `MPS` for the query:\\n+\\n+```\\n+ Which type of machine are you using?. \\n+ ```\\n+\\n+This will generate a config file that will be used automatically to properly set \\n+the default options when doing `accelerate launch`, such as the one shown below:\\n+\\n+```bash\\n+compute_environment: LOCAL_MACHINE\\n+deepspeed_config: {}\\n+distributed_type: MPS\\n+downcast_bf16: \\'no\\'\\n+fsdp_config: {}\\n+machine_rank: 0\\n+main_process_ip: null\\n+main_process_port: null\\n+main_training_function: main\\n+mixed_precision: \\'no\\'\\n+num_machines: 1\\n+num_processes: 1\\n+use_cpu: false\\n+```\\n+\\n+After this configuration has been made, here is how you run the CV example \\n+(from the root of the repo) with MPS enabled:\\n+\\n+```bash\\n+accelerate launch /examples/cv_example.py --data_dir images\\n+```\\n+\\n+## A few caveats to be aware of\\n+\\n+1. For `nlp_example.py` the metrics are too bad when compared to CPU-only training. \\n+This means certain operations in BERT model are going wrong using mps device and this needs to be fixed by PyTorch.',\n", - " 'from_author': True}],\n", - " 'context': [{'body': '_The documentation is not available anymore as the PR was closed or merged._',\n", - " 'from_author': False},\n", - " {'body': \"`notebook_launcher` doesn't seem to be modified in order to utilize this change that supports MPS device.\",\n", - " 'from_author': False}],\n", - " 'url': 'https://api.github.com/repos/huggingface/accelerate/pulls/596'},\n", - " 1015083703: {'diff': 'diff --git a/setup.py b/setup.py\\nindex a59d1ecaf..ec015f276 100644\\n--- a/setup.py\\n+++ b/setup.py\\n@@ -19,7 +19,7 @@\\n extras[\"quality\"] = [\"black ~= 22.0\", \"isort >= 5.5.4\", \"flake8 >= 3.8.3\", \"hf-doc-builder >= 0.3.0\"]\\n extras[\"docs\"] = []\\n extras[\"test_prod\"] = [\"pytest\", \"pytest-xdist\", \"pytest-subtests\", \"parameterized\"]\\n-extras[\"test_dev\"] = [\"datasets\", \"evaluate\", \"transformers\", \"scipy\", \"sklearn\", \"deepspeed\", \"tqdm\"]\\n+extras[\"test_dev\"] = [\"datasets\", \"evaluate\", \"transformers\", \"scipy\", \"sklearn\", \"deepspeed<0.7.0\", \"tqdm\"]\\n extras[\"testing\"] = extras[\"test_prod\"] + extras[\"test_dev\"]\\n \\n extras[\"test_trackers\"] = [\"wandb\", \"comet-ml\", \"tensorboard\"]\\n',\n", - " 'code_comments': [],\n", - " 'context': [{'body': '_The documentation is not available anymore as the PR was closed or merged._',\n", - " 'from_author': False}],\n", - " 'url': 'https://api.github.com/repos/huggingface/accelerate/pulls/595'},\n", - " 1015050329: {'diff': 'diff --git a/docs/source/usage_guides/tracking.mdx b/docs/source/usage_guides/tracking.mdx\\nindex 2fb68c0f2..7c61b21e9 100644\\n--- a/docs/source/usage_guides/tracking.mdx\\n+++ b/docs/source/usage_guides/tracking.mdx\\n@@ -13,7 +13,7 @@ specific language governing permissions and limitations under the License.\\n # Tracking\\n \\n There are a large number of experiment tracking API\\'s available, however getting them all to work with in a multi-processing environment can oftentimes be complex.\\n-🤗 Accelerate provides a general tracking API that can be used to log useful items during your script through [`~Accelerator.log`]\\n+🤗 Accelerate provides a general tracking API that can be used to log useful items during your script through [`Accelerator.log`]\\n \\n ## Integrated Trackers\\n \\n@@ -33,19 +33,19 @@ accelerator = Accelerator(log_with=\"wandb\")\\n accelerator = Accelerator(log_with=[\"wandb\", LoggerType.TENSORBOARD])\\n ```\\n \\n-At the start of your experiment [`~Accelerator.init_trackers`] should be used to setup your project, and potentially add any experiment hyperparameters to be logged:\\n+At the start of your experiment [`Accelerator.init_trackers`] should be used to setup your project, and potentially add any experiment hyperparameters to be logged:\\n ```python\\n hps = {\"num_iterations\": 5, \"learning_rate\": 1e-2}\\n accelerator.init_trackers(\"my_project\", config=hps)\\n ```\\n \\n-When you are ready to log any data, [`~Accelerator.log`] should be used.\\n+When you are ready to log any data, [`Accelerator.log`] should be used.\\n A `step` can also be passed in to correlate the data with a particular step in the training loop.\\n ```python\\n accelerator.log({\"train_loss\": 1.12, \"valid_loss\": 0.8}, step=1)\\n ```\\n \\n-Once you\\'ve finished training, make sure to run [`~Accelerator.end_training`] so that all the trackers can run their finish functionalities if they have any.\\n+Once you\\'ve finished training, make sure to run [`Accelerator.end_training`] so that all the trackers can run their finish functionalities if they have any.\\n ```python\\n accelerator.end_training()\\n ```\\n@@ -85,8 +85,8 @@ accelerator.end_training()\\n \\n ## Implementing Custom Trackers\\n \\n-To implement a new tracker to be used in `Accelerator`, a new one can be made through implementing the [`~GeneralTracker`] class.\\n-Every tracker must implement three functions:\\n+To implement a new tracker to be used in `Accelerator`, a new one can be made through implementing the [`GeneralTracker`] class.\\n+Every tracker must implement three functions and have three properties:\\n - `__init__`: \\n - Should store a `run_name` and initialize the tracker API of the integrated library. \\n - If a tracker stores their data locally (such as TensorBoard), a `logging_dir` parameter can be added.\\n@@ -95,6 +95,15 @@ Every tracker must implement three functions:\\n - `log`: \\n - Should take in a `values` dictionary and a `step`, and should log them to the run\\n \\n+ - `name` (`str`):\\n+ - A unique string name for the tracker, such as `\"wandb\"` for the wandb tracker. \\n+ - This will be used for interacting with this tracker specifically\\n+ - `requires_logging_directory` (`bool`):\\n+ - Whether a `logging_dir` is needed for this particular tracker and if it uses one.\\n+ - `tracker`: \\n+ - This should be implemented as a `@property` function \\n+ - Should return the internal tracking mechanism the library uses, such as the `run` object for `wandb`.\\n+\\n A brief example can be seen below with an integration with Weights and Biases, containing only the relevent information:\\n ```python\\n from accelerate.tracking import GeneralTracker\\n@@ -109,7 +118,11 @@ class MyCustomTracker(GeneralTracker):\\n \\n def __init__(self, run_name: str):\\n self.run_name = run_name\\n- wandb.init(self.run_name)\\n+ run = wandb.init(self.run_name)\\n+\\n+ @property\\n+ def tracker(self):\\n+ return self.run.run\\n \\n def store_init_configuration(self, values: dict):\\n wandb.config(values)\\n@@ -118,7 +131,7 @@ class MyCustomTracker(GeneralTracker):\\n wandb.log(values, step=step)\\n ```\\n \\n-When you are ready to build your `Accelerator` object, pass in an **instance** of your tracker to [`~Accelerator.log_with`] to have it automatically\\n+When you are ready to build your `Accelerator` object, pass in an **instance** of your tracker to [`Accelerator.log_with`] to have it automatically\\n be used with the API:\\n \\n ```python\\n@@ -133,6 +146,30 @@ tracker = MyCustomTracker(\"some_run_name\")\\n accelerator = Accelerator(log_with=[tracker, \"all\"])\\n ```\\n \\n+## Accessing the internal tracker \\n+\\n+If some custom interactions with a tracker might be wanted directly, you can quickly access one using the \\n+[`Accelerator.get_tracker`] method. Just pass in the string corresponding to a tracker\\'s `.name` attribute \\n+and it will return that tracker on the main process.\\n+\\n+This example shows doing so with wandb:\\n+\\n+```python\\n+wandb_tracker = accelerator.get_tracker(\"wandb\")\\n+```\\n+\\n+From there you can interact with `wandb`\\'s `run` object like normal:\\n+\\n+\\n+ Make sure to only interact with trackers on the main process!\\n+\\n+\\n+\\n+```python\\n+if accelerator.is_main_process:\\n+ wandb_run.log_artifact(some_artifact_to_log)\\n+```\\n+\\n ## When a wrapper cannot work\\n \\n If a library has an API that does not follow a strict `.log` with an overall dictionary such as Neptune.AI, logging can be done manually under an `if accelerator.is_main_process` statement:\\ndiff --git a/src/accelerate/accelerator.py b/src/accelerate/accelerator.py\\nindex 093d4c00f..bbd8db435 100644\\n--- a/src/accelerate/accelerator.py\\n+++ b/src/accelerate/accelerator.py\\n@@ -1069,10 +1069,24 @@ def init_trackers(self, project_name: str, config: Optional[dict] = None, init_k\\n for tracker in self.trackers:\\n tracker.store_init_configuration(config)\\n \\n+ @on_main_process\\n+ def get_tracker(self, name: str):\\n+ \"\"\"\\n+ Returns a `tracker` from `self.trackers` based on `name` on the main process only.\\n+\\n+ Args:\\n+ name (`str`):\\n+ The name of a tracker, corresponding to the `.name` property.\\n+ \"\"\"\\n+ for tracker in self.trackers:\\n+ if tracker.name == name:\\n+ return tracker.tracker\\n+ raise ValueError(f\"{name} is not an available tracker stored inside the `Accelerator`.\")\\n+\\n @on_main_process\\n def log(self, values: dict, step: Optional[int] = None, log_kwargs: Optional[dict] = {}):\\n \"\"\"\\n- Logs `values` to all stored trackers in `self.trackers`.\\n+ Logs `values` to all stored trackers in `self.trackers` on the main process only.\\n \\n Args:\\n values (`dict`):\\n@@ -1092,7 +1106,7 @@ def log(self, values: dict, step: Optional[int] = None, log_kwargs: Optional[dic\\n @on_main_process\\n def end_training(self):\\n \"\"\"\\n- Runs any special end training behaviors, such as stopping trackers\\n+ Runs any special end training behaviors, such as stopping trackers on the main process only.\\n \"\"\"\\n for tracker in self.trackers:\\n tracker.finish()\\ndiff --git a/src/accelerate/tracking.py b/src/accelerate/tracking.py\\nindex b89630a4e..673975fc7 100644\\n--- a/src/accelerate/tracking.py\\n+++ b/src/accelerate/tracking.py\\n@@ -103,6 +103,13 @@ def finish(self):\\n \"\"\"\\n pass\\n \\n+ @abstractproperty\\n+ def tracker(self):\\n+ \"\"\"\\n+ Should return internal tracking mechanism used by a tracker class (such as the `run` for wandb)\\n+ \"\"\"\\n+ pass\\n+\\n \\n class TensorBoardTracker(GeneralTracker):\\n \"\"\"\\n@@ -129,6 +136,10 @@ def __init__(self, run_name: str, logging_dir: Optional[Union[str, os.PathLike]]\\n \"Make sure to log any initial configurations with `self.store_init_configuration` before training!\"\\n )\\n \\n+ @property\\n+ def tracker(self):\\n+ return self.writer\\n+\\n def store_init_configuration(self, values: dict):\\n \"\"\"\\n Logs `values` as hyperparameters for the run. Should be run at the beginning of your experiment.\\n@@ -196,6 +207,10 @@ def __init__(self, run_name: str, **kwargs):\\n \"Make sure to log any initial configurations with `self.store_init_configuration` before training!\"\\n )\\n \\n+ @property\\n+ def tracker(self):\\n+ return self.run.run\\n+\\n def store_init_configuration(self, values: dict):\\n \"\"\"\\n Logs `values` as hyperparameters for the run. Should be run at the beginning of your experiment.\\n@@ -256,6 +271,10 @@ def __init__(self, run_name: str, **kwargs):\\n \"Make sure to log any initial configurations with `self.store_init_configuration` before training!\"\\n )\\n \\n+ @property\\n+ def tracker(self):\\n+ return self.writer\\n+\\n def store_init_configuration(self, values: dict):\\n \"\"\"\\n Logs `values` as hyperparameters for the run. Should be run at the beginning of your experiment.\\ndiff --git a/tests/test_tracking.py b/tests/test_tracking.py\\nindex c32502796..917ee329d 100644\\n--- a/tests/test_tracking.py\\n+++ b/tests/test_tracking.py\\n@@ -224,6 +224,10 @@ def __init__(self, dir: str):\\n self.writer = csv.DictWriter(self.f, fieldnames=self._col_names)\\n self.writer.writeheader()\\n \\n+ @property\\n+ def tracker(self):\\n+ return self.writer\\n+\\n def store_init_configuration(self, values: dict):\\n logger.info(\"Call init\")\\n self.writer.writerow(values)\\n',\n", - " 'code_comments': [],\n", - " 'context': [{'body': '_The documentation is not available anymore as the PR was closed or merged._',\n", - " 'from_author': False}],\n", - " 'url': 'https://api.github.com/repos/huggingface/accelerate/pulls/594'},\n", - " 1014321308: {'diff': 'diff --git a/src/accelerate/accelerator.py b/src/accelerate/accelerator.py\\nindex c2fd566b7..093d4c00f 100644\\n--- a/src/accelerate/accelerator.py\\n+++ b/src/accelerate/accelerator.py\\n@@ -412,7 +412,7 @@ def wrapper(self, *args, **kwargs):\\n \\n def on_local_process(local_process_idx):\\n \"\"\"\\n- Run func on certain local process only\\n+ A decorator that will run the decorated function on a given local process index only.\\n \"\"\"\\n \\n def decorator(func):\\n',\n", - " 'code_comments': [],\n", - " 'context': [{'body': '_The documentation is not available anymore as the PR was closed or merged._',\n", - " 'from_author': False}],\n", - " 'url': 'https://api.github.com/repos/huggingface/accelerate/pulls/591'},\n", - " 1014316178: {'diff': 'diff --git a/examples/by_feature/checkpointing.py b/examples/by_feature/checkpointing.py\\nindex 1b462a1f4..bffd843d8 100644\\n--- a/examples/by_feature/checkpointing.py\\n+++ b/examples/by_feature/checkpointing.py\\n@@ -236,23 +236,13 @@ def training_function(config, args):\\n accelerator.save_state(output_dir)\\n \\n model.eval()\\n- samples_seen = 0\\n for step, batch in enumerate(eval_dataloader):\\n # We could avoid this line since we set the accelerator with `device_placement=True` (the default).\\n batch.to(accelerator.device)\\n with torch.no_grad():\\n outputs = model(**batch)\\n predictions = outputs.logits.argmax(dim=-1)\\n- # It is slightly faster to call this once, than multiple times\\n- predictions, references = accelerator.gather((predictions, batch[\"labels\"]))\\n- if accelerator.use_distributed:\\n- if step == len(eval_dataloader) - 1:\\n- # Last batch needs to be truncated on distributed systems as it contains additional samples\\n- predictions = predictions[: len(eval_dataloader.dataset) - samples_seen]\\n- references = references[: len(eval_dataloader.dataset) - samples_seen]\\n- else:\\n- # Otherwise we add the number of samples seen\\n- samples_seen += references.shape[0]\\n+ predictions, references = accelerator.gather_for_metrics((predictions, batch[\"labels\"]))\\n metric.add_batch(\\n predictions=predictions,\\n references=references,\\ndiff --git a/examples/by_feature/cross_validation.py b/examples/by_feature/cross_validation.py\\nindex 6dd61bbf8..707c93c4c 100644\\n--- a/examples/by_feature/cross_validation.py\\n+++ b/examples/by_feature/cross_validation.py\\n@@ -203,23 +203,13 @@ def training_function(config, args):\\n optimizer.zero_grad()\\n \\n model.eval()\\n- samples_seen = 0\\n for step, batch in enumerate(eval_dataloader):\\n # We could avoid this line since we set the accelerator with `device_placement=True`.\\n batch.to(accelerator.device)\\n with torch.no_grad():\\n outputs = model(**batch)\\n predictions = outputs.logits.argmax(dim=-1)\\n- # It is slightly faster to call this once, than multiple times\\n- predictions, references = accelerator.gather((predictions, batch[\"labels\"]))\\n- if accelerator.use_distributed:\\n- if step == len(eval_dataloader) - 1:\\n- # Last batch needs to be truncated on distributed systems as it contains additional samples\\n- predictions = predictions[: len(eval_dataloader.dataset) - samples_seen]\\n- references = references[: len(eval_dataloader.dataset) - samples_seen]\\n- else:\\n- # Otherwise we add the number of samples seen\\n- samples_seen += references.shape[0]\\n+ predictions, references = accelerator.gather_for_metrics((predictions, batch[\"labels\"]))\\n metric.add_batch(\\n predictions=predictions,\\n references=references,\\ndiff --git a/examples/by_feature/fsdp_with_peak_mem_tracking.py b/examples/by_feature/fsdp_with_peak_mem_tracking.py\\nindex a279e64eb..274ccdfce 100644\\n--- a/examples/by_feature/fsdp_with_peak_mem_tracking.py\\n+++ b/examples/by_feature/fsdp_with_peak_mem_tracking.py\\n@@ -274,23 +274,13 @@ def collate_fn(examples):\\n # context manager to track the peak memory usage during the evaluation\\n with TorchTracemalloc() as tracemalloc:\\n model.eval()\\n- samples_seen = 0\\n for step, batch in enumerate(eval_dataloader):\\n # We could avoid this line since we set the accelerator with `device_placement=True`.\\n batch.to(accelerator.device)\\n with torch.no_grad():\\n outputs = model(**batch)\\n predictions = outputs.logits.argmax(dim=-1)\\n- # It is slightly faster to call this once, than multiple times\\n- predictions, references = accelerator.gather((predictions, batch[\"labels\"]))\\n- if accelerator.use_distributed:\\n- if step == len(eval_dataloader) - 1:\\n- # Last batch needs to be truncated on distributed systems as it contains additional samples\\n- predictions = predictions[: len(eval_dataloader.dataset) - samples_seen]\\n- references = references[: len(eval_dataloader.dataset) - samples_seen]\\n- else:\\n- # Otherwise we add the number of samples seen\\n- samples_seen += references.shape[0]\\n+ predictions, references = accelerator.gather_for_metrics((predictions, batch[\"labels\"]))\\n metric.add_batch(\\n predictions=predictions,\\n references=references,\\ndiff --git a/examples/by_feature/gradient_accumulation.py b/examples/by_feature/gradient_accumulation.py\\nindex 69856c97b..170a885e3 100644\\n--- a/examples/by_feature/gradient_accumulation.py\\n+++ b/examples/by_feature/gradient_accumulation.py\\n@@ -170,23 +170,13 @@ def training_function(config, args):\\n optimizer.zero_grad()\\n \\n model.eval()\\n- samples_seen = 0\\n for step, batch in enumerate(eval_dataloader):\\n # We could avoid this line since we set the accelerator with `device_placement=True`.\\n batch.to(accelerator.device)\\n with torch.no_grad():\\n outputs = model(**batch)\\n predictions = outputs.logits.argmax(dim=-1)\\n- # It is slightly faster to call this once, than multiple times\\n- predictions, references = accelerator.gather((predictions, batch[\"labels\"]))\\n- if accelerator.use_distributed:\\n- if step == len(eval_dataloader) - 1:\\n- # Last batch needs to be truncated on distributed systems as it contains additional samples\\n- predictions = predictions[: len(eval_dataloader.dataset) - samples_seen]\\n- references = references[: len(eval_dataloader.dataset) - samples_seen]\\n- else:\\n- # Otherwise we add the number of samples seen\\n- samples_seen += references.shape[0]\\n+ predictions, references = accelerator.gather_for_metrics((predictions, batch[\"labels\"]))\\n metric.add_batch(\\n predictions=predictions,\\n references=references,\\ndiff --git a/examples/by_feature/memory.py b/examples/by_feature/memory.py\\nindex 91f3e41d1..9f0d55c69 100644\\n--- a/examples/by_feature/memory.py\\n+++ b/examples/by_feature/memory.py\\n@@ -182,23 +182,13 @@ def inner_training_loop(batch_size):\\n optimizer.zero_grad()\\n \\n model.eval()\\n- samples_seen = 0\\n for step, batch in enumerate(eval_dataloader):\\n # We could avoid this line since we set the accelerator with `device_placement=True`.\\n batch.to(accelerator.device)\\n with torch.no_grad():\\n outputs = model(**batch)\\n predictions = outputs.logits.argmax(dim=-1)\\n- # It is slightly faster to call this once, than multiple times\\n- predictions, references = accelerator.gather((predictions, batch[\"labels\"]))\\n- if accelerator.use_distributed:\\n- if step == len(eval_dataloader) - 1:\\n- # Last batch needs to be truncated on distributed systems as it contains additional samples\\n- predictions = predictions[: len(eval_dataloader.dataset) - samples_seen]\\n- references = references[: len(eval_dataloader.dataset) - samples_seen]\\n- else:\\n- # Otherwise we add the number of samples seen\\n- samples_seen += references.shape[0]\\n+ predictions, references = accelerator.gather_for_metrics((predictions, batch[\"labels\"]))\\n metric.add_batch(\\n predictions=predictions,\\n references=references,\\ndiff --git a/examples/by_feature/multi_process_metrics.py b/examples/by_feature/multi_process_metrics.py\\nindex 522cc571b..cb9534c4a 100644\\n--- a/examples/by_feature/multi_process_metrics.py\\n+++ b/examples/by_feature/multi_process_metrics.py\\n@@ -192,6 +192,8 @@ def training_function(config, args):\\n else:\\n # Otherwise we add the number of samples seen\\n samples_seen += references.shape[0]\\n+ # All of this can be avoided if you use `Accelerator.gather_for_metrics` instead of `Accelerator.gather`:\\n+ # accelerator.gather_for_metrics((predictions, batch[\"labels\"]))\\n metric.add_batch(\\n predictions=predictions,\\n references=references,\\ndiff --git a/examples/by_feature/tracking.py b/examples/by_feature/tracking.py\\nindex d7248682d..76ad7a64b 100644\\n--- a/examples/by_feature/tracking.py\\n+++ b/examples/by_feature/tracking.py\\n@@ -194,23 +194,13 @@ def training_function(config, args):\\n optimizer.zero_grad()\\n \\n model.eval()\\n- samples_seen = 0\\n for step, batch in enumerate(eval_dataloader):\\n # We could avoid this line since we set the accelerator with `device_placement=True` (the default).\\n batch.to(accelerator.device)\\n with torch.no_grad():\\n outputs = model(**batch)\\n predictions = outputs.logits.argmax(dim=-1)\\n- # It is slightly faster to call this once, than multiple times\\n- predictions, references = accelerator.gather((predictions, batch[\"labels\"]))\\n- if accelerator.use_distributed:\\n- if step == len(eval_dataloader) - 1:\\n- # Last batch needs to be truncated on distributed systems as it contains additional samples\\n- predictions = predictions[: len(eval_dataloader.dataset) - samples_seen]\\n- references = references[: len(eval_dataloader.dataset) - samples_seen]\\n- else:\\n- # Otherwise we add the number of samples seen\\n- samples_seen += references.shape[0]\\n+ predictions, references = accelerator.gather_for_metrics((predictions, batch[\"labels\"]))\\n metric.add_batch(\\n predictions=predictions,\\n references=references,\\ndiff --git a/examples/complete_cv_example.py b/examples/complete_cv_example.py\\nindex f984406bf..99d109bd1 100644\\n--- a/examples/complete_cv_example.py\\n+++ b/examples/complete_cv_example.py\\n@@ -232,7 +232,6 @@ def training_function(config, args):\\n accelerator.save_state(output_dir)\\n model.eval()\\n accurate = 0\\n- samples_seen = 0\\n for step, batch in enumerate(eval_dataloader):\\n # We could avoid this line since we set the accelerator with `device_placement=True`.\\n batch = {k: v.to(accelerator.device) for k, v in batch.items()}\\n@@ -240,16 +239,7 @@ def training_function(config, args):\\n with torch.no_grad():\\n outputs = model(inputs)\\n predictions = outputs.argmax(dim=-1)\\n- # It is slightly faster to call this once, than multiple times\\n- predictions, references = accelerator.gather((predictions, batch[\"label\"]))\\n- if accelerator.use_distributed:\\n- if step == len(eval_dataloader) - 1:\\n- # Last batch needs to be truncated on distributed systems as it contains additional samples\\n- predictions = predictions[: len(eval_dataloader.dataset) - samples_seen]\\n- references = references[: len(eval_dataloader.dataset) - samples_seen]\\n- else:\\n- # Otherwise we add the number of samples seen\\n- samples_seen += references.shape[0]\\n+ predictions, references = accelerator.gather_for_metrics((predictions, batch[\"label\"]))\\n accurate_preds = predictions == references\\n accurate += accurate_preds.long().sum()\\n \\ndiff --git a/examples/complete_nlp_example.py b/examples/complete_nlp_example.py\\nindex 000da6038..094f6cfb5 100644\\n--- a/examples/complete_nlp_example.py\\n+++ b/examples/complete_nlp_example.py\\n@@ -212,23 +212,13 @@ def collate_fn(examples):\\n accelerator.save_state(output_dir)\\n \\n model.eval()\\n- samples_seen = 0\\n for step, batch in enumerate(eval_dataloader):\\n # We could avoid this line since we set the accelerator with `device_placement=True`.\\n batch.to(accelerator.device)\\n with torch.no_grad():\\n outputs = model(**batch)\\n predictions = outputs.logits.argmax(dim=-1)\\n- # It is slightly faster to call this once, than multiple times\\n- predictions, references = accelerator.gather((predictions, batch[\"labels\"]))\\n- if accelerator.use_distributed:\\n- if step == len(eval_dataloader) - 1:\\n- # Last batch needs to be truncated on distributed systems as it contains additional samples\\n- predictions = predictions[: len(eval_dataloader.dataset) - samples_seen]\\n- references = references[: len(eval_dataloader.dataset) - samples_seen]\\n- else:\\n- # Otherwise we add the number of samples seen\\n- samples_seen += references.shape[0]\\n+ predictions, references = accelerator.gather_for_metrics((predictions, batch[\"labels\"]))\\n metric.add_batch(\\n predictions=predictions,\\n references=references,\\ndiff --git a/examples/cv_example.py b/examples/cv_example.py\\nindex 875b68398..1118a2f0e 100644\\n--- a/examples/cv_example.py\\n+++ b/examples/cv_example.py\\n@@ -166,7 +166,6 @@ def training_function(config, args):\\n model.eval()\\n accurate = 0\\n num_elems = 0\\n- samples_seen = 0\\n for _, batch in enumerate(eval_dataloader):\\n # We could avoid this line since we set the accelerator with `device_placement=True`.\\n batch = {k: v.to(accelerator.device) for k, v in batch.items()}\\n@@ -174,16 +173,7 @@ def training_function(config, args):\\n with torch.no_grad():\\n outputs = model(inputs)\\n predictions = outputs.argmax(dim=-1)\\n- # It is slightly faster to call this once, than multiple times\\n- predictions, references = accelerator.gather((predictions, batch[\"label\"]))\\n- if accelerator.use_distributed:\\n- if step == len(eval_dataloader) - 1:\\n- # Last batch needs to be truncated on distributed systems as it contains additional samples\\n- predictions = predictions[: len(eval_dataloader.dataset) - samples_seen]\\n- references = references[: len(eval_dataloader.dataset) - samples_seen]\\n- else:\\n- # Otherwise we add the number of samples seen\\n- samples_seen += references.shape[0]\\n+ predictions, references = accelerator.gather_for_metrics((predictions, batch[\"label\"]))\\n accurate_preds = predictions == references\\n num_elems += accurate_preds.shape[0]\\n accurate += accurate_preds.long().sum()\\ndiff --git a/examples/nlp_example.py b/examples/nlp_example.py\\nindex b1e7cba27..a126b5dd5 100644\\n--- a/examples/nlp_example.py\\n+++ b/examples/nlp_example.py\\n@@ -154,23 +154,13 @@ def training_function(config, args):\\n optimizer.zero_grad()\\n \\n model.eval()\\n- samples_seen = 0\\n for step, batch in enumerate(eval_dataloader):\\n # We could avoid this line since we set the accelerator with `device_placement=True`.\\n batch.to(accelerator.device)\\n with torch.no_grad():\\n outputs = model(**batch)\\n predictions = outputs.logits.argmax(dim=-1)\\n- # It is slightly faster to call this once, than multiple times\\n- predictions, references = accelerator.gather((predictions, batch[\"labels\"]))\\n- if accelerator.use_distributed:\\n- if step == len(eval_dataloader) - 1:\\n- # Last batch needs to be truncated on distributed systems as it contains additional samples\\n- predictions = predictions[: len(eval_dataloader.dataset) - samples_seen]\\n- references = references[: len(eval_dataloader.dataset) - samples_seen]\\n- else:\\n- # Otherwise we add the number of samples seen\\n- samples_seen += references.shape[0]\\n+ predictions, references = accelerator.gather_for_metrics((predictions, batch[\"labels\"]))\\n metric.add_batch(\\n predictions=predictions,\\n references=references,\\ndiff --git a/src/accelerate/accelerator.py b/src/accelerate/accelerator.py\\nindex bbd8db435..ef421a22b 100644\\n--- a/src/accelerate/accelerator.py\\n+++ b/src/accelerate/accelerator.py\\n@@ -956,9 +956,6 @@ def gather_for_metrics(self, tensor):\\n tensor (`torch.Tensor`, or a nested tuple/list/dictionary of `torch.Tensor`):\\n The tensors for calculating metrics across all processes.\\n \"\"\"\\n- raise NotImplementedError(\\n- \"Currently there are a number of bugs with this method. You should use `Accelerator.gather()` and drop the samples yourself for the time being.\"\\n- )\\n tensor = self.gather(tensor)\\n if self.use_distributed:\\n if self.gradient_state.remainder == -1:\\ndiff --git a/src/accelerate/data_loader.py b/src/accelerate/data_loader.py\\nindex 4cedd8f1a..b8152b302 100644\\n--- a/src/accelerate/data_loader.py\\n+++ b/src/accelerate/data_loader.py\\n@@ -470,11 +470,6 @@ def _fetch_batches(self, iterator):\\n \\n def __iter__(self):\\n self.gradient_state._set_end_of_dataloader(False)\\n- try:\\n- length = getattr(self.dataset, \"total_dataset_length\", len(self.dataset))\\n- self.gradient_state._set_remainder(length % self.batch_size)\\n- except:\\n- self.gradient_state._set_remainder(-1)\\n main_iterator = None\\n if self.state.process_index == 0:\\n # We only iterate through the DataLoader on process 0.\\n@@ -501,7 +496,16 @@ def __iter__(self):\\n observed_batch_size = find_batch_size(batch)\\n batch_size = observed_batch_size // self.state.num_processes\\n \\n- if not self._drop_last and self._stop_iteration and observed_batch_size % self.state.num_processes != 0:\\n+ stop_iteration = self._stop_iteration\\n+ if not stop_iteration:\\n+ # We may still be at the end of the dataloader without knowing it yet: if there is nothing left in\\n+ # the dataloader since the number of batches is a round multiple of the number of processes.\\n+ next_batch, next_batch_info, next_skip = self._fetch_batches(main_iterator)\\n+ # next_batch_info[0] is None when there are no more batches, otherwise we still need to process them.\\n+ if self._stop_iteration and next_batch_info[0] is None:\\n+ stop_iteration = True\\n+\\n+ if not self._drop_last and stop_iteration and observed_batch_size % self.state.num_processes != 0:\\n # If the last batch is not complete, let\\'s add the first batch to it.\\n batch = concatenate([batch, first_batch], dim=0)\\n # Batch size computation above is wrong, it\\'s off by 1 so we fix it.\\n@@ -510,16 +514,8 @@ def __iter__(self):\\n data_slice = slice(self.state.process_index * batch_size, (self.state.process_index + 1) * batch_size)\\n batch = slice_tensors(batch, data_slice)\\n \\n- stop_iteration = self._stop_iteration\\n- if not stop_iteration:\\n- # We may still be at the end of the dataloader without knowing it yet: if there is nothing left\\n- # because by change the dataset had a round multiple of samples.\\n- next_batch, next_batch_info, next_skip = self._fetch_batches(main_iterator)\\n- # next_batch_info[0] is None when there are no more batches, otherwise we still need to process them.\\n- if self._stop_iteration and next_batch_info[0] is None:\\n- stop_iteration = True\\n-\\n if stop_iteration:\\n+ self.gradient_state._set_remainder(observed_batch_size)\\n self.gradient_state._set_end_of_dataloader(True)\\n yield batch\\n \\ndiff --git a/src/accelerate/test_utils/__init__.py b/src/accelerate/test_utils/__init__.py\\nindex 382bf81bf..43d5ed7ef 100644\\n--- a/src/accelerate/test_utils/__init__.py\\n+++ b/src/accelerate/test_utils/__init__.py\\n@@ -7,6 +7,7 @@\\n execute_subprocess_async,\\n require_cpu,\\n require_cuda,\\n+ require_huggingface_suite,\\n require_multi_gpu,\\n require_single_gpu,\\n require_tpu,\\n@@ -16,4 +17,4 @@\\n from .training import RegressionDataset, RegressionModel\\n \\n \\n-from .scripts import test_metrics, test_script, test_sync # isort:skip\\n+from .scripts import test_script, test_sync # isort:skip\\ndiff --git a/src/accelerate/test_utils/scripts/external_deps/__init__.py b/src/accelerate/test_utils/scripts/external_deps/__init__.py\\nnew file mode 100644\\nindex 000000000..e69de29bb\\ndiff --git a/src/accelerate/test_utils/scripts/external_deps/test_metrics.py b/src/accelerate/test_utils/scripts/external_deps/test_metrics.py\\nnew file mode 100755\\nindex 000000000..d234ee14a\\n--- /dev/null\\n+++ b/src/accelerate/test_utils/scripts/external_deps/test_metrics.py\\n@@ -0,0 +1,170 @@\\n+# Copyright 2022 The HuggingFace Team. All rights reserved.\\n+#\\n+# Licensed under the Apache License, Version 2.0 (the \"License\");\\n+# you may not use this file except in compliance with the License.\\n+# You may obtain a copy of the License at\\n+#\\n+# http://www.apache.org/licenses/LICENSE-2.0\\n+#\\n+# Unless required by applicable law or agreed to in writing, software\\n+# distributed under the License is distributed on an \"AS IS\" BASIS,\\n+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\\n+# See the License for the specific language governing permissions and\\n+# limitations under the License.\\n+\\n+import math\\n+from copy import deepcopy\\n+\\n+import torch\\n+from torch.utils.data import DataLoader\\n+\\n+import datasets\\n+import evaluate\\n+import transformers\\n+from accelerate import Accelerator\\n+from accelerate.test_utils import RegressionDataset, RegressionModel\\n+from accelerate.utils import is_tpu_available, set_seed\\n+from datasets import load_dataset\\n+from transformers import AutoModelForSequenceClassification, AutoTokenizer\\n+\\n+\\n+def get_basic_setup(accelerator, num_samples=82):\\n+ \"Returns everything needed to perform basic training\"\\n+ set_seed(42)\\n+ model = RegressionModel()\\n+ ddp_model = deepcopy(model)\\n+ dset = RegressionDataset(length=num_samples)\\n+ dataloader = DataLoader(dset, batch_size=16)\\n+ model.to(accelerator.device)\\n+ ddp_model, dataloader = accelerator.prepare(ddp_model, dataloader)\\n+ return model, ddp_model, dataloader\\n+\\n+\\n+def get_dataloader(accelerator: Accelerator, use_longest=False):\\n+ tokenizer = AutoTokenizer.from_pretrained(\"hf-internal-testing/mrpc-bert-base-cased\")\\n+ dataset = load_dataset(\"glue\", \"mrpc\", split=\"validation\")\\n+\\n+ def tokenize_function(examples):\\n+ outputs = tokenizer(examples[\"sentence1\"], examples[\"sentence2\"], truncation=True, max_length=None)\\n+ return outputs\\n+\\n+ with accelerator.main_process_first():\\n+ tokenized_datasets = dataset.map(\\n+ tokenize_function,\\n+ batched=True,\\n+ remove_columns=[\"idx\", \"sentence1\", \"sentence2\"],\\n+ )\\n+\\n+ tokenized_datasets = tokenized_datasets.rename_column(\"label\", \"labels\")\\n+\\n+ def collate_fn(examples):\\n+ if use_longest:\\n+ return tokenizer.pad(examples, padding=\"longest\", return_tensors=\"pt\")\\n+ return tokenizer.pad(examples, padding=\"max_length\", max_length=128, return_tensors=\"pt\")\\n+\\n+ return DataLoader(tokenized_datasets, shuffle=False, collate_fn=collate_fn, batch_size=16)\\n+\\n+\\n+def get_mrpc_setup(dispatch_batches, split_batches):\\n+ accelerator = Accelerator(dispatch_batches=dispatch_batches, split_batches=split_batches)\\n+ dataloader = get_dataloader(accelerator, not dispatch_batches)\\n+ model = AutoModelForSequenceClassification.from_pretrained(\\n+ \"hf-internal-testing/mrpc-bert-base-cased\", return_dict=True\\n+ )\\n+ ddp_model, ddp_dataloader = accelerator.prepare(model, dataloader)\\n+ return {\"ddp\": [ddp_model, ddp_dataloader, \"cuda:0\"], \"no\": [model, dataloader, accelerator.device]}, accelerator\\n+\\n+\\n+def generate_predictions(model, dataloader, accelerator):\\n+ logits_and_targets = []\\n+ for batch in dataloader:\\n+ input, target = batch.values()\\n+ with torch.no_grad():\\n+ logit = model(input)\\n+ logit, target = accelerator.gather_for_metrics((logit, target))\\n+ logits_and_targets.append((logit, target))\\n+ logits, targs = [], []\\n+ for (logit, targ) in logits_and_targets:\\n+ logits.append(logit)\\n+ targs.append(targ)\\n+ logits, targs = torch.cat(logits), torch.cat(targs)\\n+ return logits, targs\\n+\\n+\\n+def test_torch_metrics(accelerator: Accelerator, num_samples=82, dispatch_batches=False, split_batches=False):\\n+ model, ddp_model, dataloader = get_basic_setup(accelerator, num_samples)\\n+ logits, targs = generate_predictions(ddp_model, dataloader, accelerator)\\n+ assert (\\n+ len(logits) == num_samples\\n+ ), f\"Unexpected number of inputs:\\\\n Expected: {num_samples}\\\\n Actual: {len(logits)}\"\\n+\\n+\\n+def test_mrpc(dispatch_batches: bool = False, split_batches: bool = False):\\n+ metric = evaluate.load(\"glue\", \"mrpc\")\\n+ setup, accelerator = get_mrpc_setup(dispatch_batches, split_batches)\\n+ # First do baseline\\n+ model, dataloader, device = setup[\"no\"]\\n+ model.to(device)\\n+ model.eval()\\n+ for batch in dataloader:\\n+ batch.to(device)\\n+ with torch.inference_mode():\\n+ outputs = model(**batch)\\n+ preds = outputs.logits.argmax(dim=-1)\\n+ metric.add_batch(predictions=preds, references=batch[\"labels\"])\\n+ baseline = metric.compute()\\n+\\n+ # Then do distributed\\n+ model, dataloader, device = setup[\"ddp\"]\\n+ model.eval()\\n+ for batch in dataloader:\\n+ with torch.inference_mode():\\n+ outputs = model(**batch)\\n+ preds = outputs.logits.argmax(dim=-1)\\n+ references = batch[\"labels\"]\\n+ preds, references = accelerator.gather_for_metrics((preds, references))\\n+ metric.add_batch(predictions=preds, references=references)\\n+ distributed = metric.compute()\\n+\\n+ for key in \"accuracy f1\".split():\\n+ assert math.isclose(\\n+ baseline[key], distributed[key]\\n+ ), f\"Baseline and Distributed are not the same for key {key}:\\\\n\\\\tBaseline: {baseline[key]}\\\\n\\\\tDistributed: {distributed[key]}\\\\n\"\\n+\\n+\\n+def main():\\n+ accelerator = Accelerator(split_batches=False, dispatch_batches=False)\\n+ if accelerator.is_local_main_process:\\n+ datasets.utils.logging.set_verbosity_warning()\\n+ transformers.utils.logging.set_verbosity_warning()\\n+ else:\\n+ datasets.utils.logging.set_verbosity_error()\\n+ transformers.utils.logging.set_verbosity_error()\\n+ # These are a bit slower so they should only be ran on the GPU or TPU\\n+ if torch.cuda.is_available() or is_tpu_available():\\n+ if accelerator.is_local_main_process:\\n+ print(\"**Testing gather_for_metrics**\")\\n+ for split_batches in [True, False]:\\n+ for dispatch_batches in [True, False]:\\n+ if accelerator.is_local_main_process:\\n+ print(f\"With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`\")\\n+ test_mrpc(dispatch_batches, split_batches)\\n+ accelerator.state._reset_state()\\n+ if accelerator.is_local_main_process:\\n+ print(\"**Test torch metrics**\")\\n+ for split_batches in [True, False]:\\n+ for dispatch_batches in [True, False]:\\n+ accelerator = Accelerator(split_batches=split_batches, dispatch_batches=dispatch_batches)\\n+ if accelerator.is_local_main_process:\\n+ print(f\"With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`, length=99\")\\n+ test_torch_metrics(accelerator, 99)\\n+ accelerator.state._reset_state()\\n+\\n+\\n+def _mp_fn(index):\\n+ # For xla_spawn (TPUs)\\n+ main()\\n+\\n+\\n+if __name__ == \"__main__\":\\n+ main()\\ndiff --git a/src/accelerate/test_utils/scripts/test_metrics.py b/src/accelerate/test_utils/scripts/test_metrics.py\\ndeleted file mode 100644\\nindex 8f057ccc5..000000000\\n--- a/src/accelerate/test_utils/scripts/test_metrics.py\\n+++ /dev/null\\n@@ -1,90 +0,0 @@\\n-# Copyright 2022 The HuggingFace Team. All rights reserved.\\n-#\\n-# Licensed under the Apache License, Version 2.0 (the \"License\");\\n-# you may not use this file except in compliance with the License.\\n-# You may obtain a copy of the License at\\n-#\\n-# http://www.apache.org/licenses/LICENSE-2.0\\n-#\\n-# Unless required by applicable law or agreed to in writing, software\\n-# distributed under the License is distributed on an \"AS IS\" BASIS,\\n-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\\n-# See the License for the specific language governing permissions and\\n-# limitations under the License.\\n-\\n-from copy import deepcopy\\n-\\n-import torch\\n-from torch.utils.data import DataLoader\\n-\\n-from accelerate import Accelerator\\n-from accelerate.test_utils import RegressionDataset, RegressionModel\\n-from accelerate.utils import set_seed\\n-\\n-\\n-def get_setup(accelerator, num_samples=82):\\n- \"Returns everything needed to perform basic training\"\\n- set_seed(42)\\n- model = RegressionModel()\\n- ddp_model = deepcopy(model)\\n- dset = RegressionDataset(length=num_samples)\\n- dataloader = DataLoader(dset, batch_size=16)\\n- model.to(accelerator.device)\\n- ddp_model, dataloader = accelerator.prepare(ddp_model, dataloader)\\n- return model, ddp_model, dataloader\\n-\\n-\\n-def generate_predictions(model, dataloader, accelerator):\\n- logits_and_targets = []\\n- for batch in dataloader:\\n- input, target = batch.values()\\n- with torch.no_grad():\\n- logits = model(input)\\n- logits, target = accelerator.gather_for_metrics((logits, target))\\n- logits_and_targets.append((logits, target))\\n- inps, targs = [], []\\n- for (inp, targ) in logits_and_targets:\\n- inps.append(inp)\\n- targs.append(targ)\\n- inps, targs = torch.cat(inps), torch.cat(targs)\\n- return inps, targs\\n-\\n-\\n-def test_torch_metrics(accelerator: Accelerator, num_samples=82):\\n- model, ddp_model, dataloader = get_setup(accelerator, num_samples)\\n- inps, targs = generate_predictions(ddp_model, dataloader, accelerator)\\n- assert (\\n- len(inps) == num_samples\\n- ), f\"Unexpected number of inputs:\\\\n Expected: {num_samples}\\\\n Actual: {len(inps)}\"\\n-\\n-\\n-def main():\\n- accelerator = Accelerator(split_batches=False, dispatch_batches=False)\\n- if accelerator.is_local_main_process:\\n- print(\"**Test torch metrics**\")\\n- print(\"With: `split_batches=False`, `dispatch_batches=False`\")\\n- test_torch_metrics(accelerator)\\n- accelerator.state._reset_state()\\n- accelerator = Accelerator(split_batches=True, dispatch_batches=False)\\n- if accelerator.is_local_main_process:\\n- print(\"With: `split_batches=True`, `dispatch_batches=False`\")\\n- test_torch_metrics(accelerator)\\n- accelerator.state._reset_state()\\n- accelerator = Accelerator(split_batches=False, dispatch_batches=True)\\n- if accelerator.is_local_main_process:\\n- print(\"With: `split_batches=False`, `dispatch_batches=True`\")\\n- test_torch_metrics(accelerator)\\n- accelerator.state._reset_state()\\n- accelerator = Accelerator(split_batches=True, dispatch_batches=True)\\n- if accelerator.is_local_main_process:\\n- print(\"With: `split_batches=True`, `dispatch_batches=True`\")\\n- test_torch_metrics(accelerator)\\n-\\n-\\n-def _mp_fn(index):\\n- # For xla_spawn (TPUs)\\n- main()\\n-\\n-\\n-if __name__ == \"__main__\":\\n- main()\\ndiff --git a/src/accelerate/test_utils/testing.py b/src/accelerate/test_utils/testing.py\\nindex 145cc80b2..381f92c58 100644\\n--- a/src/accelerate/test_utils/testing.py\\n+++ b/src/accelerate/test_utils/testing.py\\n@@ -30,10 +30,12 @@\\n from ..utils import (\\n gather,\\n is_comet_ml_available,\\n+ is_datasets_available,\\n is_deepspeed_available,\\n is_tensorboard_available,\\n is_torch_version,\\n is_tpu_available,\\n+ is_transformers_available,\\n is_wandb_available,\\n )\\n \\n@@ -84,6 +86,15 @@ def require_cuda(test_case):\\n return unittest.skipUnless(torch.cuda.is_available(), \"test requires a GPU\")(test_case)\\n \\n \\n+def require_huggingface_suite(test_case):\\n+ \"\"\"\\n+ Decorator marking a test that requires transformers and datasets. These tests are skipped when they are not.\\n+ \"\"\"\\n+ return unittest.skipUnless(\\n+ is_transformers_available() and is_datasets_available(), \"test requires the Hugging Face suite\"\\n+ )(test_case)\\n+\\n+\\n def require_tpu(test_case):\\n \"\"\"\\n Decorator marking a test that requires TPUs. These tests are skipped when there are no TPUs available.\\ndiff --git a/src/accelerate/utils/__init__.py b/src/accelerate/utils/__init__.py\\nindex d1a015b93..db21afc3a 100644\\n--- a/src/accelerate/utils/__init__.py\\n+++ b/src/accelerate/utils/__init__.py\\n@@ -25,6 +25,7 @@\\n is_boto3_available,\\n is_ccl_available,\\n is_comet_ml_available,\\n+ is_datasets_available,\\n is_deepspeed_available,\\n is_sagemaker_available,\\n is_tensorboard_available,\\ndiff --git a/src/accelerate/utils/imports.py b/src/accelerate/utils/imports.py\\nindex bc4e3b4cd..074d02e4a 100644\\n--- a/src/accelerate/utils/imports.py\\n+++ b/src/accelerate/utils/imports.py\\n@@ -89,6 +89,10 @@ def is_transformers_available():\\n return importlib.util.find_spec(\"transformers\") is not None\\n \\n \\n+def is_datasets_available():\\n+ return importlib.util.find_spec(\"datasets\") is not None\\n+\\n+\\n def is_tensorboard_available():\\n return importlib.util.find_spec(\"tensorboard\") is not None or importlib.util.find_spec(\"tensorboardX\") is not None\\n \\ndiff --git a/tests/test_metrics.py b/tests/test_metrics.py\\nindex 7e42c793e..b26ef00b2 100644\\n--- a/tests/test_metrics.py\\n+++ b/tests/test_metrics.py\\n@@ -23,31 +23,36 @@\\n from accelerate.test_utils import (\\n execute_subprocess_async,\\n require_cpu,\\n+ require_huggingface_suite,\\n require_multi_gpu,\\n require_single_gpu,\\n- skip,\\n- test_metrics,\\n )\\n from accelerate.utils import get_launch_prefix, patch_environment\\n \\n \\n-@skip\\n+@require_huggingface_suite\\n class MetricTester(unittest.TestCase):\\n def setUp(self):\\n mod_file = inspect.getfile(accelerate.test_utils)\\n- self.test_file_path = os.path.sep.join(mod_file.split(os.path.sep)[:-1] + [\"scripts\", \"test_metrics.py\"])\\n+ self.test_file_path = os.path.sep.join(\\n+ mod_file.split(os.path.sep)[:-1] + [\"scripts\", \"external_deps\", \"test_metrics.py\"]\\n+ )\\n+\\n+ from accelerate.test_utils.scripts.external_deps import test_metrics # noqa: F401\\n+\\n+ self.test_metrics = test_metrics\\n \\n @require_cpu\\n def test_metric_cpu_noop(self):\\n- debug_launcher(test_metrics.main, num_processes=1)\\n+ debug_launcher(self.test_metrics.main, num_processes=1)\\n \\n @require_cpu\\n def test_metric_cpu_multi(self):\\n- debug_launcher(test_metrics.main)\\n+ debug_launcher(self.test_metrics.main)\\n \\n @require_single_gpu\\n def test_metric_gpu(self):\\n- test_metrics.main()\\n+ self.test_metrics.main()\\n \\n @require_multi_gpu\\n def test_metric_gpu_multi(self):\\n',\n", - " 'code_comments': [],\n", - " 'context': [{'body': '_The documentation is not available anymore as the PR was closed or merged._',\n", - " 'from_author': False}],\n", - " 'url': 'https://api.github.com/repos/huggingface/accelerate/pulls/590'}}" - ] - }, - "execution_count": 150, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "with open(\"data.json\") as f:\n", - " import json\n", - " json.dump()" - ] - }, - { - "cell_type": "code", - "execution_count": 143, - "id": "111fbcb2-a1f2-4d26-8d82-742f65545635", - "metadata": {}, - "outputs": [ - { - "data": { - "text/plain": [ - "{'diff': 'diff --git a/src/accelerate/utils/modeling.py b/src/accelerate/utils/modeling.py\\nindex c58cfeb50..679e57ff2 100644\\n--- a/src/accelerate/utils/modeling.py\\n+++ b/src/accelerate/utils/modeling.py\\n@@ -666,7 +666,7 @@ def load_checkpoint_in_model(\\n elif len(potential_index) == 1:\\n index_filename = os.path.join(checkpoint, potential_index[0])\\n else:\\n- raise ValueError(f\"{checkpoint} containing mote than one `.index.json` file, delete the irrelevant ones.\")\\n+ raise ValueError(f\"{checkpoint} containing more than one `.index.json` file, delete the irrelevant ones.\")\\n else:\\n raise ValueError(\\n \"`checkpoint` should be the path to a file containing a whole state dict, or the index of a sharded \"\\n',\n", - " 'code_comments': [],\n", - " 'context': [{'id': 1374301240,\n", - " 'created_at': datetime.datetime(2023, 1, 7, 0, 22, 51),\n", - " 'body': '_The documentation is not available anymore as the PR was closed or merged._',\n", - " 'from_author': False}]}" - ] - }, - "execution_count": 143, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "data[1188526483]" - ] - }, - { - "cell_type": "code", - "execution_count": 76, - "id": "f5d6bd99-dc80-4821-89fb-031127771a49", - "metadata": {}, - "outputs": [], - "source": [ - "pull = repo.get_pull(937)" - ] - }, - { - "cell_type": "code", - "execution_count": 65, - "id": "25b51688-09f9-4ce6-a8e1-1eba42b73de2", - "metadata": {}, "outputs": [], "source": [ - "comments = pull.get_comments() # These are change comments" + "new_code = []\n", + "for item in list(data.keys()):\n", + " d = data[item]\n", + " d[\"pr_id\"] = item\n", + " if len(d[\"code_comments\"]) > 1:\n", + " new_code.append(d)" ] }, { "cell_type": "code", - "execution_count": 74, - "id": "975e7333-a354-4002-9951-85d7f5f7c2c2", + "execution_count": 174, + "id": "1149e33e-75bf-4b0b-a94c-fe5ee2a291bc", "metadata": {}, "outputs": [ { "data": { "text/plain": [ - "\"@@ -34,7 +34,98 @@\\n

Run your *raw* PyTorch training script on any kind of device\\n \\n \\n-🤗 Accelerate provides an easy API to make your scripts run with mixed precision and on any kind of distributed setting (multi-GPUs, TPUs etc.) while still letting you write your own training loop. The same code can then run on your local machine for debugging or your training environment.\\n+🤗 Accelerate was created for PyTorch users who like to write the training loop of PyTorch models but are reluctant to write and maintain the boiler code needed to use multi-GPUs/TPU/fp16.\\n+\\n+🤗 Accelerate abstracts exactly and only the boiler code related to multi-GPUs/TPU/fp16 and let the rest of your code unchanged.\\n+\\n+Here is an example:\\n+\\n+\\n+\\n+\\n+\\n+\\n+\\n+\\n+
Original training code (CPU or mono-GPU only) With Accelerate for CPU/GPU/multi-GPUs/TPUs/fp16
\\n+\\n+```python\\n+import torch\\n+import torch.nn.functional as F\\n+from datasets import load_dataset\\n+\\n+\\n+\\n+device = 'cpu'\\n+\\n+model = torch.nn.Transformer().to(device)\\n+optim = torch.optim.Adam(model.parameters())\\n+\\n+dataset = load_dataset('my_dataset')\\n+data = torch.utils.data.Dataloader(dataset)\\n+\\n+\\n+\\n+\\n+model.train()\\n+for epoch in range(10):\\n+ for source, targets in data:\\n+ source = source.to(device)\\n+ targets = targets.to(device)\\n+\\n+ optimizer.zero_grad()\\n+\\n+ output = model(source, targets)\\n+ loss = F.cross_entropy(output, targets)\\n+\\n+ loss.backward()\\n+\\n+ optimizer.step()\\n+```\\n+\\n+\\n+\\n+```python\\n+ import torch\\n+ import torch.nn.functional as F\\n+ from datasets import load_dataset\\n+\\n++ from accelerate import Accelerator\\n++ accelerator = Accelerator()\"" + "134" ] }, - "execution_count": 74, + "execution_count": 174, "metadata": {}, "output_type": "execute_result" } ], "source": [ - "comments[0].diff_hunk" + "len(new_code)" ] }, { "cell_type": "code", - "execution_count": 78, - "id": "1a031325-53bc-40db-936f-876a92283773", - "metadata": {}, - "outputs": [ - { - "data": { - "text/plain": [ - "'_The documentation is not available anymore as the PR was closed or merged._'" - ] - }, - "execution_count": 78, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "pull.get_issue_comments()[0].body # These are raw comments" - ] - }, - { - "cell_type": "code", - "execution_count": 26, - "id": "8e748bdc-5a29-4c4e-89e6-601a12725629", - "metadata": {}, - "outputs": [ - { - "data": { - "text/plain": [ - "\"@@ -34,7 +34,98 @@\\n

Run your *raw* PyTorch training script on any kind of device\\n \\n \\n-🤗 Accelerate provides an easy API to make your scripts run with mixed precision and on any kind of distributed setting (multi-GPUs, TPUs etc.) while still letting you write your own training loop. The same code can then run on your local machine for debugging or your training environment.\\n+🤗 Accelerate was created for PyTorch users who like to write the training loop of PyTorch models but are reluctant to write and maintain the boiler code needed to use multi-GPUs/TPU/fp16.\\n+\\n+🤗 Accelerate abstracts exactly and only the boiler code related to multi-GPUs/TPU/fp16 and let the rest of your code unchanged.\\n+\\n+Here is an example:\\n+\\n+\\n+\\n+\\n+\\n+\\n+\\n+\\n+
Original training code (CPU or mono-GPU only) With Accelerate for CPU/GPU/multi-GPUs/TPUs/fp16
\\n+\\n+```python\\n+import torch\\n+import torch.nn.functional as F\\n+from datasets import load_dataset\\n+\\n+\\n+\\n+device = 'cpu'\\n+\\n+model = torch.nn.Transformer().to(device)\\n+optim = torch.optim.Adam(model.parameters())\\n+\\n+dataset = load_dataset('my_dataset')\\n+data = torch.utils.data.Dataloader(dataset)\\n+\\n+\\n+\\n+\\n+model.train()\\n+for epoch in range(10):\\n+ for source, targets in data:\\n+ source = source.to(device)\\n+ targets = targets.to(device)\\n+\\n+ optimizer.zero_grad()\\n+\\n+ output = model(source, targets)\\n+ loss = F.cross_entropy(output, targets)\\n+\\n+ loss.backward()\\n+\\n+ optimizer.step()\\n+```\\n+\\n+\\n+\\n+```python\\n+ import torch\\n+ import torch.nn.functional as F\\n+ from datasets import load_dataset\\n+\\n++ from accelerate import Accelerator\\n++ accelerator = Accelerator()\"" - ] - }, - "execution_count": 26, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "comment.diff_hunk" - ] - }, - { - "cell_type": "code", - "execution_count": 25, - "id": "fd61cc78-19a9-44d2-9720-a521e7238657", + "execution_count": 175, + "id": "7763a1aa-727c-41c9-b348-a4adaaca8445", "metadata": {}, "outputs": [], "source": [ - "comment.raw_data;\n", - "# diff hunk - both comment + full pr" + "import json\n", + "with open(\"data.json\", \"w+\") as f:\n", + " json.dump(new_code, f)" ] }, {