My index.js file:
const Discord = require('discord.js');
const levels = require('discord-xp');
const client = new Discord.Client();
const mongoose = require('./database/mongoose');
const fs = require(`fs`);
require('dotenv').config();
const port = process.env.PORT || 5000;
const host = '0.0.0.0';
const chalk = require('chalk');
const { Player } = require('discord-player');
const Gamedig = require('gamedig');
client.prefix = (`${process.env.PREFIX}`);
client.commands = new Discord.Collection();
client.player = new Player(client);
client.config = require('./config/bot');
client.emotes = client.config.emojis;
client.filters = client.config.filters;
fs.readdirSync('./commands').forEach(dirs => {
const commands = fs.readdirSync(`./commands/${dirs}`).filter(files => files.endsWith('.js'));
for (const file of commands) {
const command = require(`./commands/${dirs}/${file}`);
console.log(`Loading command ${file}`);
client.commands.set(command.name.toLowerCase(), command);
};
});
const player = fs.readdirSync(`./player`).filter(file => file.endsWith('.js'));
const events = fs.readdirSync('./events').filter(file => file.endsWith('.js'));
for (const file of player) {
console.log(`Loading discord-player event ${file}`);
const event = require(`./player/${file}`);
client.player.on(file.split(".")[0], event.bind(null, client));
};
for (const file of events) {
console.log(`Loading discord.js event ${file}`);
const event = require(`./events/${file}`);
client.on(file.split(".")[0], event.bind(null, client));
};
mongoose.init();
client.login(process.env.TOKEN)
My Procfile:
Worker: node index.js
My package.json:
{
"name": "icrp-bot",
"version": "1.0.0",
"description": "Made For ICRP ",
"main": "index.js",
"scripts": {
"test": ".test",
"start": "node index.js"
},
"author": "Bombo43453#1901",
"license": "ISC",
"dependencies": {
"axios": "^0.21.1",
"baseplayer": "^0.2.9",
"chalk": "^4.1.1",
"discord-fivem-api": "^1.0.4",
"discord-player": "^3.4.0",
"discord-xp": "^1.1.14",
"discord.js": "^12.5.3",
"dotenv": "^8.2.0",
"ffmpeg-static": "^4.3.0",
"gamedig": "^3.0.1",
"log-timestamp": "^0.3.0",
"moment": "^2.29.1",
"moment-timezone": "^0.5.33",
"mongoose": "^5.11.14",
"node-gyp": "^8.0.0",
"opus": "0.0.0",
"opusscript": "0.0.8",
"pm2": "^4.5.6",
"python": "0.0.4",
"rebuild": "^0.1.2"
}
}
Full Error:
2021-04-23T17:18:33.808571+00:00 heroku[Worker.1]: State changed from down to starting
2021-04-23T17:18:43.196477+00:00 heroku[Worker.1]: Starting process with command `node index.js`
2021-04-23T17:18:43.846098+00:00 heroku[Worker.1]: State changed from starting to up
2021-04-23T17:18:43.854420+00:00 heroku[Worker.1]: Idling
2021-04-23T17:18:43.856605+00:00 heroku[Worker.1]: State changed from up to down
2021-04-23T17:18:43.870047+00:00 heroku[Worker.1]: Idling because quota is exhausted
2021-04-23T17:18:50.422071+00:00 app[Worker.1]: Error waiting for network: Resource temporarily unavailable
Info: this is a simple bot I have made for a fivem server. I tried to host it with heroku and tried to fix it. I tried adding a port const and a const host but that did not fix anything. I also think that my issue is very small and I have 200 hours left on heroku meaning that hours are not the issue. I tried changing the port to 8080 too and that did not help with anything. If you have an idea please tell me.
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and
privacy statement. We’ll occasionally send you account related emails.
Already on GitHub?
Sign in
to your account
Open
CCuskley opened this issue
May 29, 2018
· 17 comments
Comments
I’m running a simple Flask server on Heroku:
web: gunicorn --worker-class eventlet -w 1 app:app --log-file=-
It’s using Python 2.7.15 for compatibility with various other packages.
I seem to have run into a duplicate of this problem from ages ago since Heroku has moved to v. 19.8.1. Some images (anywhere from a few kb to a few mb in size) won’t load; I have a site with lots of images (mostly sprite sheets for animation) and seemingly a random selection of them won’t load each time, each throwing the following error (if the images are cached from an earlier version, it loads without issue):
2018-05-29T09:24:36.216949+00:00 app[web.1]: [2018-05-29 09:24:36 +0000] [10] [ERROR] Socket error processing request. 2018-05-29T09:24:36.216969+00:00 app[web.1]: Traceback (most recent call last): 2018-05-29T09:24:36.216971+00:00 app[web.1]: File "/app/.heroku/python/lib/python2.7/site-packages/gunicorn/workers/async.py", line 66, in handle 2018-05-29T09:24:36.216972+00:00 app[web.1]: six.reraise(*sys.exc_info()) 2018-05-29T09:24:36.216974+00:00 app[web.1]: File "/app/.heroku/python/lib/python2.7/site-packages/gunicorn/workers/async.py", line 56, in handle 2018-05-29T09:24:36.216976+00:00 app[web.1]: self.handle_request(listener_name, req, client, addr) 2018-05-29T09:24:36.216978+00:00 app[web.1]: File "/app/.heroku/python/lib/python2.7/site-packages/gunicorn/workers/async.py", line 129, in handle_request 2018-05-29T09:24:36.216980+00:00 app[web.1]: six.reraise(*sys.exc_info()) 2018-05-29T09:24:36.216981+00:00 app[web.1]: File "/app/.heroku/python/lib/python2.7/site-packages/gunicorn/workers/async.py", line 112, in handle_request 2018-05-29T09:24:36.216983+00:00 app[web.1]: resp.write_file(respiter) 2018-05-29T09:24:36.216985+00:00 app[web.1]: File "/app/.heroku/python/lib/python2.7/site-packages/gunicorn/http/wsgi.py", line 403, in write_file 2018-05-29T09:24:36.216987+00:00 app[web.1]: if not self.sendfile(respiter): 2018-05-29T09:24:36.216989+00:00 app[web.1]: File "/app/.heroku/python/lib/python2.7/site-packages/gunicorn/http/wsgi.py", line 393, in sendfile 2018-05-29T09:24:36.216990+00:00 app[web.1]: sent += sendfile(sockno, fileno, offset + sent, count) 2018-05-29T09:24:36.216992+00:00 app[web.1]: File "/app/.heroku/python/lib/python2.7/site-packages/gunicorn/http/_sendfile.py", line 66, in sendfile 2018-05-29T09:24:36.216994+00:00 app[web.1]: raise OSError(e, os.strerror(e)) 2018-05-29T09:24:36.216996+00:00 app[web.1]: OSError: [Errno 11] Resource temporarily unavailable
these are the other versions in the requirements.txt:
Flask==0.12.2 gunicorn==19.8.1 pymongo==3.6.1 flask_socketio==2.9.6 flask_cors==3.0.3 eventlet==0.22.1 gevent==1.2.2
Changing gunicorn to 19.7.1 seems to resolve the problem; it persists with 19.8.0.
As with the similar problem from 2012, it’s not a request timeout issue as the error it throws is pretty immediate. Rolling back to 19.7.1 has fixed it, so for now I’ll stick with that, but it would be nice to use the latest version. Seems like this could be a Heroku-specific problem; I only noticed in the last month or so, but can’t find any information about when they changed versions.
I fought with this same issue today, all day. And I think I finally fixed it. I’m using nginx, Flask, gunicorn w/ eventlet, and docker.
My (relevant) pip freeze
output:
eventlet==0.23.0
Flask==1.0.2
greenlet==0.4.14
gunicorn==19.9.0
My gunicorn command:
gunicorn -b 0.0.0.0:8000 --workers 1 --worker-class eventlet --log-level=DEBUG myapp.wsgi:app
The first symptom was large static file loads throwing ERR_CONTENT_LENGTH_MISMATCH
in the browser. Obviously this broke the application, as large static JS libs weren’t being loaded.
The second symptom was nginx logging the following to error.log: upstream prematurely closed connection while reading upstream
Finally, I traced it back to a gunicorn log item:
Socket error processing request. - [in /usr/local/lib/python2.7/dist-packages/gunicorn/glogging.py:277]
Traceback (most recent call last):
File "/usr/local/lib/python2.7/dist-packages/gunicorn/workers/base_async.py", line 66, in handle
six.reraise(*sys.exc_info())
File "/usr/local/lib/python2.7/dist-packages/gunicorn/workers/base_async.py", line 56, in handle
self.handle_request(listener_name, req, client, addr)
File "/usr/local/lib/python2.7/dist-packages/gunicorn/workers/base_async.py", line 129, in handle_request
six.reraise(*sys.exc_info())
File "/usr/local/lib/python2.7/dist-packages/gunicorn/workers/base_async.py", line 112, in handle_request
resp.write_file(respiter)
File "/usr/local/lib/python2.7/dist-packages/gunicorn/http/wsgi.py", line 403, in write_file
if not self.sendfile(respiter):
File "/usr/local/lib/python2.7/dist-packages/gunicorn/http/wsgi.py", line 393, in sendfile
sent += sendfile(sockno, fileno, offset + sent, count)
File "/usr/local/lib/python2.7/dist-packages/gunicorn/http/_sendfile.py", line 66, in sendfile
raise OSError(e, os.strerror(e))
OSError: [Errno 11] Resource temporarily unavailable
My eventual solution was to start gunicorn with the --no-sendfile
flag, and the problem went away. Why? Not sure… I’m just happy it’s working.
Also worth mentioning, during my troubleshooting, I did my best to have my nginx.conf resemble the example found here: http://docs.gunicorn.org/en/stable/deploy.html
I meet this issue too, 19.7.0 work fine
Does this occur immediately or after a long response has been partially sent?
May be serve static file cause
Any update on this? I am facing the same issue in 19.9.0
Everything was working fine and all of a sudden it started happening.
@tilgovi let me know if you need some info regarding this. All of a sudden this issue started showing up
For me the problem was due to the eventlet worker. I removed eventlet and everything is fine now.
I meet the same problem. And the suggestion from @SaintSimmo works well for me. This problem occurs immediately when starting to download a large file. I am using flask and eventlet. And the downloading job is done by send_from_directory from Flask.
The gunicorn is started in following command:
gunicorn —worker-class eventlet -w 1 -b 0.0.0.0:4000 upload:app
which will give the error.
if «—no-sendfile» is added in the command, no error message is sent. If the downloading job can be done without «sendfile», so when this «sendfile» should be used?
gunicorn (version 19.9.0) same issue with eventlet
jacebrowning
added a commit
to jacebrowning/memegen
that referenced
this issue
Apr 7, 2019
Yep, the same problem and after remove the eventlet worker it’s working fine.
When i start the server with this command (gunicorn -w 1 -k eventlet -b 127.0.0.1:5000 wsgi:app) i receive the exceptions below and my image is truncated on the client response.
[ERROR] Socket error processing request.
…
BlockingIOError: [Errno 11] Resource temporarily unavailable
Removing the worker class definition, it work.
gunicorn -w 1 -b 127.0.0.1:5000 wsgi:app
@jacebrowning and @SaintSimmo, I confirm that —no-sendfile extra parameter into gunicorn command is effective.
bobf
added a commit
to bobf/skep
that referenced
this issue
May 13, 2019
bobf
added a commit
to bobf/skep
that referenced
this issue
May 17, 2019
This is usually due to the nproc is too few, you can increase the number of nproc for the user who run that program by editing the file ‘/etc/security/limits.conf‘.
Are you suffering from this problem? If you are using Python 3.4 or above, update your Gunicorn version.
I had the same problem with —worker-class. I updated gunicorn to 20.0.4 and the problem was solved.
Changing worker class to gevent worked for me. --worker-class gevent
Changing worker class from gevent
to sync
worked for me.
BTW, I’m serving a DL model using pytorch.
It’s good use for me,thank you.
import gevent.monkey gevent.monkey.patch_all()
remove those two line works for me
I am running a docker server on Arch Linux (kernel 4.3.3-2) with several containers. Since my last reboot, both the docker server and random programs within the containers crash with a message about not being able to create a thread, or (less often) to fork. The specific error message is different depending on the program, but most of them seem to mention the specific error Resource temporarily unavailable
. See at the end of this post for some example error messages.
Now there are plenty of people who have had this error message, and plenty of responses to them. What’s really frustrating is that everyone seems to be speculating how the issue could be resolved, but no one seems to point out how to identify which of the many possible causes for the problem is present.
I have collected these 5 possible causes for the error and how to verify that they are not present on my system:
- There is a system-wide limit on the number of threads configured in
/proc/sys/kernel/threads-max
(source). In my case this is set to60613
. - Every thread takes some space in the stack. The stack size limit is configured using
ulimit -s
(source). The limit for my shell used to be8192
, but I have increased it by putting* soft stack 32768
into/etc/security/limits.conf
, so itulimit -s
now returns32768
. I have also increased it for the docker process by puttingLimitSTACK=33554432
into/etc/systemd/system/docker.service
(source, and I verified that the limit applies by looking into/proc/<pid of docker>/limits
and by runningulimit -s
inside a docker container. - Every thread takes some memory. A virtual memory limit is configured using
ulimit -v
. On my system it is set tounlimited
, and 80% of my 3 GB of memory are free. - There is a limit on the number of processes using
ulimit -u
. Threads count as processes in this case (source). On my system, the limit is set to30306
, and for the docker daemon and inside docker containers, the limit is1048576
. The number of currently running threads can be found out by runningls -1d /proc/*/task/* | wc -l
or by runningps -elfT | wc -l
(source). On my system they are between700
and800
. - There is a limit on the number of open files, which according to some sources is also relevant when creating threads. The limit is configured using
ulimit -n
. On my system and inside docker, the limit is set to1048576
. The number of open files can be found out usinglsof | wc -l
(source), on my system it is about30000
.
It looks like before the last reboot I was running kernel 4.2.5-1, now I’m running 4.3.3-2. Downgrading to 4.2.5-1 fixes all the problems. Other posts mentioning the problem are this and this. I have opened a bug report for Arch Linux.
What has changed in the kernel that could be causing this?
Here are some example error messages:
Crash dump was written to: erl_crash.dump
Failed to create aux thread
Jan 07 14:37:25 edeltraud docker[30625]: runtime/cgo: pthread_create failed: Resource temporarily unavailable
dpkg: unrecoverable fatal error, aborting:
fork failed: Resource temporarily unavailable
E: Sub-process /usr/bin/dpkg returned an error code (2)
test -z "/usr/include" || /usr/sbin/mkdir -p "/tmp/lib32-popt/pkg/lib32-popt/usr/include"
/bin/sh: fork: retry: Resource temporarily unavailable
/usr/bin/install -c -m 644 popt.h '/tmp/lib32-popt/pkg/lib32-popt/usr/include'
test -z "/usr/share/man/man3" || /usr/sbin/mkdir -p "/tmp/lib32-popt/pkg/lib32-popt/usr/share/man/man3"
/bin/sh: fork: retry: Resource temporarily unavailable
/bin/sh: fork: retry: No child processes
/bin/sh: fork: retry: Resource temporarily unavailable
/bin/sh: fork: retry: No child processes
/bin/sh: fork: retry: No child processes
/bin/sh: fork: retry: Resource temporarily unavailable
/bin/sh: fork: retry: Resource temporarily unavailable
/bin/sh: fork: retry: No child processes
/bin/sh: fork: Resource temporarily unavailable
/bin/sh: fork: Resource temporarily unavailable
make[3]: *** [install-man3] Error 254
Jan 07 11:04:39 edeltraud docker[780]: time="2016-01-07T11:04:39.986684617+01:00" level=error msg="Error running container: [8] System error: fork/exec /proc/self/exe: resource temporarily unavailable"
[Wed Jan 06 23:20:33.701287 2016] [mpm_event:alert] [pid 217:tid 140325422335744] (11)Resource temporarily unavailable: apr_thread_create: unable to create worker thread