Commit dfa8e6f5e49344b3cf9d1469bbd817e58a8e669e
Committed by
Joenio Costa
1 parent
f619be45
Exists in
master
and in
28 other branches
Invitation e-mails are sent in background
Added delayed_job plugin Added a daemon to start/stop delayed_job Added invitation on a queue to be processed later (ActionItem1640)
Showing
55 changed files
with
2622 additions
and
17 deletions
Show diff stats
app/controllers/public/invite_controller.rb
... | ... | @@ -21,8 +21,8 @@ class InviteController < PublicController |
21 | 21 | if !params[:mail_template].match(/<url>/) |
22 | 22 | flash.now[:notice] = _('<url> is needed in invitation mail.') |
23 | 23 | elsif !contacts_to_invite.empty? |
24 | - Invitation.invite(current_user.person, contacts_to_invite, params[:mail_template], profile) | |
25 | - session[:notice] = _('Your invitations have been sent.') | |
24 | + Delayed::Job.enqueue InvitationJob.new(current_user.person, contacts_to_invite, params[:mail_template], profile) | |
25 | + session[:notice] = _('Your invitations are being sent.') | |
26 | 26 | if profile.person? |
27 | 27 | redirect_to :controller => 'friends' |
28 | 28 | else | ... | ... |
... | ... | @@ -0,0 +1,21 @@ |
1 | +class CreateDelayedJobs < ActiveRecord::Migration | |
2 | + def self.up | |
3 | + create_table :delayed_jobs, :force => true do |table| | |
4 | + table.integer :priority, :default => 0 # Allows some jobs to jump to the front of the queue | |
5 | + table.integer :attempts, :default => 0 # Provides for retries, but still fail eventually. | |
6 | + table.text :handler # YAML-encoded string of the object that will do work | |
7 | + table.text :last_error # reason for last failure (See Note below) | |
8 | + table.datetime :run_at # When to run. Could be Time.zone.now for immediately, or sometime in the future. | |
9 | + table.datetime :locked_at # Set when a client is working on this object | |
10 | + table.datetime :failed_at # Set when all retries have failed (actually, by default, the record is deleted instead) | |
11 | + table.string :locked_by # Who is working on this object (if locked) | |
12 | + table.timestamps | |
13 | + end | |
14 | + | |
15 | + add_index :delayed_jobs, [:priority, :run_at], :name => 'delayed_jobs_priority' | |
16 | + end | |
17 | + | |
18 | + def self.down | |
19 | + drop_table :delayed_jobs | |
20 | + end | |
21 | +end | ... | ... |
db/schema.rb
... | ... | @@ -9,7 +9,7 @@ |
9 | 9 | # |
10 | 10 | # It's strongly recommended to check this file into your version control system. |
11 | 11 | |
12 | -ActiveRecord::Schema.define(:version => 20100811211216) do | |
12 | +ActiveRecord::Schema.define(:version => 20100820153354) do | |
13 | 13 | |
14 | 14 | create_table "article_versions", :force => true do |t| |
15 | 15 | t.integer "article_id" |
... | ... | @@ -160,6 +160,21 @@ ActiveRecord::Schema.define(:version => 20100811211216) do |
160 | 160 | t.datetime "created_at" |
161 | 161 | end |
162 | 162 | |
163 | + create_table "delayed_jobs", :force => true do |t| | |
164 | + t.integer "priority", :default => 0 | |
165 | + t.integer "attempts", :default => 0 | |
166 | + t.text "handler" | |
167 | + t.text "last_error" | |
168 | + t.datetime "run_at" | |
169 | + t.datetime "locked_at" | |
170 | + t.datetime "failed_at" | |
171 | + t.string "locked_by" | |
172 | + t.datetime "created_at" | |
173 | + t.datetime "updated_at" | |
174 | + end | |
175 | + | |
176 | + add_index "delayed_jobs", ["priority", "run_at"], :name => "delayed_jobs_priority" | |
177 | + | |
163 | 178 | create_table "domains", :force => true do |t| |
164 | 179 | t.string "name" |
165 | 180 | t.string "owner_type" | ... | ... |
features/invitation.feature
... | ... | @@ -91,12 +91,14 @@ Feature: invitation |
91 | 91 | And I fill in "manual_import_addresses" with "santos@invalid.br" |
92 | 92 | And I fill in "mail_template" with "Follow this link <url>" |
93 | 93 | And I press "Invite my friends!" |
94 | + Given there are no pending jobs | |
94 | 95 | When I am logged in as "josesantos" |
95 | 96 | And I go to the Control panel |
96 | 97 | And I should see "josesilva invites you to join the community 26 Bsslines." |
97 | 98 | |
98 | 99 | Scenario: noosfero user accepts to join community |
99 | 100 | Given I invite email "santos@invalid.br" to join community "26 Bsslines" |
101 | + And there are no pending jobs | |
100 | 102 | When I am logged in as "josesantos" |
101 | 103 | And I go to the Control panel |
102 | 104 | And I follow "Process requests" |
... | ... | @@ -110,6 +112,7 @@ Feature: invitation |
110 | 112 | |
111 | 113 | Scenario: noosfero user rejects to join community |
112 | 114 | Given I invite email "santos@invalid.br" to join community "26 Bsslines" |
115 | + And there are no pending jobs | |
113 | 116 | When I am logged in as "josesantos" |
114 | 117 | And I go to the Control panel |
115 | 118 | And I follow "Process requests" |
... | ... | @@ -129,12 +132,14 @@ Feature: invitation |
129 | 132 | And I fill in "manual_import_addresses" with "santos@invalid.br" |
130 | 133 | And I fill in "mail_template" with "Follow this link <url>" |
131 | 134 | And I press "Invite my friends!" |
135 | + Given there are no pending jobs | |
132 | 136 | When I am logged in as "josesantos" |
133 | 137 | And I go to the Control panel |
134 | 138 | And I should see "josesilva wants to be your friend." |
135 | 139 | |
136 | 140 | Scenario: noosfero user accepts to be friend |
137 | 141 | Given I invite email "santos@invalid.br" to be my friend |
142 | + And there are no pending jobs | |
138 | 143 | When I am logged in as "josesantos" |
139 | 144 | And I go to the Control panel |
140 | 145 | And I follow "Process requests" |
... | ... | @@ -148,6 +153,7 @@ Feature: invitation |
148 | 153 | |
149 | 154 | Scenario: noosfero user rejects to be friend |
150 | 155 | Given I invite email "santos@invalid.br" to be my friend |
156 | + And there are no pending jobs | |
151 | 157 | When I am logged in as "josesantos" |
152 | 158 | And I go to the Control panel |
153 | 159 | And I follow "Process requests" | ... | ... |
features/step_definitions/invitation_steps.rb
... | ... | @@ -17,3 +17,7 @@ Given /^I invite email "(.+)" to be my friend$/ do |email| |
17 | 17 | fill_in('mail_template', :with => 'Follow this link <url>') |
18 | 18 | click_button("Invite my friends!") |
19 | 19 | end |
20 | + | |
21 | +Given /^there are no pending jobs$/ do | |
22 | + Delayed::Worker.new.work_off | |
23 | +end | ... | ... |
... | ... | @@ -0,0 +1,33 @@ |
1 | +#!/usr/bin/env ruby | |
2 | + | |
3 | +## This is the Noosfero delayed job controller script. It starts and stops the | |
4 | +# delayed job daemon, which is implemented in the DelayedJob plugin. | |
5 | +# | |
6 | +# The role of this script is to just start/stop the daemon, write a PID file, | |
7 | +# etc. The actual feed update logic is DelayedJob plugin. | |
8 | + | |
9 | +require 'daemons' | |
10 | + | |
11 | +NOOSFERO_ROOT = File.expand_path(File.dirname(__FILE__) + '/../') | |
12 | + | |
13 | +options = { | |
14 | + :dir_mode => :normal, | |
15 | + :dir => File.dirname(__FILE__) + '/../tmp/pids', | |
16 | + :multiple => false, | |
17 | + :backtrace => true, | |
18 | + :monitor => true, | |
19 | +} | |
20 | + | |
21 | +Daemons.run_proc('job_runner', options) do | |
22 | + if ARGV.include?('--') | |
23 | + ARGV.slice! 0..ARGV.index('--') | |
24 | + else | |
25 | + ARGV.clear | |
26 | + end | |
27 | + | |
28 | + Dir.chdir NOOSFERO_ROOT | |
29 | + RAILS_ENV = ARGV.first || ENV['RAILS_ENV'] || 'development' | |
30 | + require NOOSFERO_ROOT + '/config/environment' | |
31 | + | |
32 | + Delayed::Worker.new.start | |
33 | +end | ... | ... |
script/production
... | ... | @@ -21,9 +21,11 @@ do_start() { |
21 | 21 | ./script/ferret_server -e $RAILS_ENV start |
22 | 22 | ./script/feed-updater start |
23 | 23 | mongrel_rails cluster::start |
24 | + ./script/delayed_job start | |
24 | 25 | } |
25 | 26 | |
26 | 27 | do_stop() { |
28 | + ./script/delayed_job stop | |
27 | 29 | mongrel_rails cluster::stop |
28 | 30 | ./script/feed-updater stop |
29 | 31 | ./script/ferret_server -e $RAILS_ENV stop | ... | ... |
test/functional/invite_controller_test.rb
... | ... | @@ -10,48 +10,83 @@ class InviteControllerTest < ActionController::TestCase |
10 | 10 | end |
11 | 11 | attr_accessor :profile, :friend, :community |
12 | 12 | |
13 | - should 'actually invite manually added address with friend object' do | |
14 | - assert_difference InviteFriend, :count, 1 do | |
13 | + should 'add manually invitation of an added address with friend object on a queue and process it later' do | |
14 | + assert_difference Delayed::Job, :count, 1 do | |
15 | 15 | post :friends, :profile => profile.identifier, :manual_import_addresses => "#{friend.name} <#{friend.email}>", :import_from => "manual", :mail_template => "click: <url>", :step => 2 |
16 | 16 | assert_redirected_to :controller => 'friends' |
17 | 17 | end |
18 | - end | |
19 | 18 | |
20 | - should 'actually invite manually added address with only e-mail' do | |
21 | 19 | assert_difference InviteFriend, :count, 1 do |
22 | - post :friends, :profile => profile.identifier, :manual_import_addresses => "test@test.com", :import_from => "manual", :mail_template => "click: <url>", :step => 2 | |
20 | + Delayed::Worker.new.work_off | |
23 | 21 | end |
24 | 22 | end |
25 | 23 | |
26 | - should 'actually invite manually added addresses with e-mail and other format' do | |
24 | + should 'add manually invitation of an added address with only email on a queue and process it later' do | |
25 | + assert_difference Delayed::Job, :count, 1 do | |
26 | + post :friends, :profile => profile.identifier, :manual_import_addresses => "test@test.com", :import_from => "manual", :mail_template => "click: <url>", :step => 2 | |
27 | + assert_redirected_to :controller => 'friends' | |
28 | + end | |
29 | + | |
27 | 30 | assert_difference InviteFriend, :count, 1 do |
31 | + Delayed::Worker.new.work_off | |
32 | + end | |
33 | + end | |
34 | + | |
35 | + should 'add manually invitation of an added address with email and other format on a queue and process it later' do | |
36 | + assert_difference Delayed::Job, :count, 1 do | |
28 | 37 | post :friends, :profile => profile.identifier, :manual_import_addresses => "test@test.cz.com", :import_from => "manual", :mail_template => "click: <url>", :step => 2 |
38 | + assert_redirected_to :controller => 'friends' | |
39 | + end | |
40 | + | |
41 | + assert_difference InviteFriend, :count, 1 do | |
42 | + Delayed::Worker.new.work_off | |
29 | 43 | end |
30 | 44 | end |
31 | 45 | |
32 | - should 'actually invite more than one manually added address' do | |
33 | - assert_difference InviteFriend, :count, 2 do | |
46 | + should 'add manually invitation of more than one added address on a queue and process it later' do | |
47 | + assert_difference Delayed::Job, :count, 1 do | |
34 | 48 | post :friends, :profile => profile.identifier, :manual_import_addresses => "Some Friend <somefriend@email.com>\r\notherperson@bleble.net\r\n", :import_from => "manual", :mail_template => "click: <url>", :step => 2 |
49 | + assert_redirected_to :controller => 'friends' | |
50 | + end | |
51 | + | |
52 | + assert_difference InviteFriend, :count, 2 do | |
53 | + Delayed::Worker.new.work_off | |
35 | 54 | end |
36 | 55 | end |
37 | 56 | |
38 | - should 'actualy invite manually added addresses with name and e-mail' do | |
39 | - assert_difference InviteFriend, :count, 1 do | |
57 | + should 'add manually invitation of an added address with name and e-mail on a queue and process it later' do | |
58 | + assert_difference Delayed::Job, :count, 1 do | |
40 | 59 | post :friends, :profile => profile.identifier, :manual_import_addresses => "Test Name <test@test.com>", :import_from => "manual", :mail_template => "click: <url>", :step => 2 |
60 | + assert_redirected_to :controller => 'friends' | |
61 | + end | |
62 | + | |
63 | + assert_difference InviteFriend, :count, 1 do | |
64 | + Delayed::Worker.new.work_off | |
41 | 65 | end |
42 | 66 | end |
43 | 67 | |
44 | - should 'not invite yourself' do | |
45 | - assert_no_difference InviteFriend, :count do | |
68 | + should 'add invitation of yourself on a queue and not process it later' do | |
69 | + assert_difference Delayed::Job, :count, 1 do | |
46 | 70 | post :friends, :profile => profile.identifier, :manual_import_addresses => "#{profile.name} <#{profile.user.email}>", :import_from => "manual", :mail_template => "click: <url>", :step => 2 |
71 | + assert_redirected_to :controller => 'friends' | |
72 | + end | |
73 | + | |
74 | + assert_no_difference InviteFriend, :count do | |
75 | + Delayed::Worker.new.work_off | |
47 | 76 | end |
48 | 77 | end |
49 | 78 | |
50 | - should 'not invite if already a friend' do | |
79 | + should 'add invitation of an already friend on a queue and not process it later' do | |
51 | 80 | friend = create_user('testfriend', :email => 'friend@noosfero.org') |
52 | 81 | friend.person.add_friend(profile) |
53 | - assert_no_difference InviteFriend, :count do | |
82 | + | |
83 | + assert_difference Delayed::Job, :count, 1 do | |
54 | 84 | post :friends, :profile => profile.identifier, :manual_import_addresses => "#{friend.name} <#{friend.email}>", :import_from => "manual", :mail_template => "click: <url>", :step => 2 |
85 | + assert_redirected_to :controller => 'friends' | |
86 | + end | |
87 | + | |
88 | + assert_no_difference InviteFriend, :count do | |
89 | + Delayed::Worker.new.work_off | |
55 | 90 | end |
56 | 91 | end |
57 | 92 | ... | ... |
... | ... | @@ -0,0 +1,20 @@ |
1 | +Copyright (c) 2005 Tobias Luetke | |
2 | + | |
3 | +Permission is hereby granted, free of charge, to any person obtaining | |
4 | +a copy of this software and associated documentation files (the | |
5 | +"Software"), to deal in the Software without restriction, including | |
6 | +without limitation the rights to use, copy, modify, merge, publish, | |
7 | +distribute, sublicense, and/or sell copies of the Software, and to | |
8 | +permit persons to whom the Software is furnished to do so, subject to | |
9 | +the following conditions: | |
10 | + | |
11 | +The above copyright notice and this permission notice shall be | |
12 | +included in all copies or substantial portions of the Software. | |
13 | + | |
14 | +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | |
15 | +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | |
16 | +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOa AND | |
17 | +NONINFRINGEMENT. IN NO EVENT SaALL THE AUTHORS OR COPYRIGHT HOLDERS BE | |
18 | +LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION | |
19 | +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION | |
20 | +WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. | |
0 | 21 | \ No newline at end of file | ... | ... |
... | ... | @@ -0,0 +1,210 @@ |
1 | +h1. Delayed::Job | |
2 | + | |
3 | +Delated_job (or DJ) encapsulates the common pattern of asynchronously executing longer tasks in the background. | |
4 | + | |
5 | +It is a direct extraction from Shopify where the job table is responsible for a multitude of core tasks. Amongst those tasks are: | |
6 | + | |
7 | +* sending massive newsletters | |
8 | +* image resizing | |
9 | +* http downloads | |
10 | +* updating smart collections | |
11 | +* updating solr, our search server, after product changes | |
12 | +* batch imports | |
13 | +* spam checks | |
14 | + | |
15 | +h2. Installation | |
16 | + | |
17 | +To install as a gem, add the following to @config/environment.rb@: | |
18 | + | |
19 | +<pre> | |
20 | +config.gem 'delayed_job' | |
21 | +</pre> | |
22 | + | |
23 | +Rake tasks are not automatically loaded from gems, so you'll need to add the following to your Rakefile: | |
24 | + | |
25 | +<pre> | |
26 | +begin | |
27 | + require 'delayed/tasks' | |
28 | +rescue LoadError | |
29 | + STDERR.puts "Run `rake gems:install` to install delayed_job" | |
30 | +end | |
31 | +</pre> | |
32 | + | |
33 | +To install as a plugin: | |
34 | + | |
35 | +<pre> | |
36 | +script/plugin install git://github.com/collectiveidea/delayed_job.git | |
37 | +</pre> | |
38 | + | |
39 | +After delayed_job is installed, you will need to setup the backend. | |
40 | + | |
41 | +h2. Backends | |
42 | + | |
43 | +delayed_job supports multiple backends for storing the job queue. There are currently implementations for Active Record, MongoMapper, and DataMapper. | |
44 | + | |
45 | +h3. Active Record | |
46 | + | |
47 | +The default is Active Record, which requires a jobs table. | |
48 | + | |
49 | +<pre> | |
50 | +$ script/generate delayed_job | |
51 | +$ rake db:migrate | |
52 | +</pre> | |
53 | + | |
54 | +h3. MongoMapper | |
55 | + | |
56 | +You must use @MongoMapper.setup@ in the initializer: | |
57 | + | |
58 | +<pre> | |
59 | +config = YAML::load(File.read(Rails.root.join('config/mongo.yml'))) | |
60 | +MongoMapper.setup(config, Rails.env) | |
61 | + | |
62 | +Delayed::Worker.backend = :mongo_mapper | |
63 | +</pre> | |
64 | + | |
65 | +h3. DataMapper | |
66 | + | |
67 | +<pre> | |
68 | +# config/initializers/delayed_job.rb | |
69 | +Delayed::Worker.backend = :data_mapper | |
70 | +Delayed::Worker.backend.auto_upgrade! | |
71 | +</pre> | |
72 | + | |
73 | +h2. Queuing Jobs | |
74 | + | |
75 | +Call @.delay.method(params)@ on any object and it will be processed in the background. | |
76 | + | |
77 | +<pre> | |
78 | +# without delayed_job | |
79 | +Notifier.deliver_signup(@user) | |
80 | + | |
81 | +# with delayed_job | |
82 | +Notifier.delay.deliver_signup @user | |
83 | +</pre> | |
84 | + | |
85 | +If a method should always be run in the background, you can call @#handle_asynchronously@ after the method declaration: | |
86 | + | |
87 | +<pre> | |
88 | +class Device | |
89 | + def deliver | |
90 | + # long running method | |
91 | + end | |
92 | + handle_asynchronously :deliver | |
93 | +end | |
94 | + | |
95 | +device = Device.new | |
96 | +device.deliver | |
97 | +</pre> | |
98 | + | |
99 | +h2. Running Jobs | |
100 | + | |
101 | +@script/delayed_job@ can be used to manage a background process which will start working off jobs. Make sure you've run `script/generate delayed_job`. | |
102 | + | |
103 | +<pre> | |
104 | +$ RAILS_ENV=production script/delayed_job start | |
105 | +$ RAILS_ENV=production script/delayed_job stop | |
106 | + | |
107 | +# Runs two workers in separate processes. | |
108 | +$ RAILS_ENV=production script/delayed_job -n 2 start | |
109 | +$ RAILS_ENV=production script/delayed_job stop | |
110 | +</pre> | |
111 | + | |
112 | +Workers can be running on any computer, as long as they have access to the database and their clock is in sync. Keep in mind that each worker will check the database at least every 5 seconds. | |
113 | + | |
114 | +You can also invoke @rake jobs:work@ which will start working off jobs. You can cancel the rake task with @CTRL-C@. | |
115 | + | |
116 | +h2. Custom Jobs | |
117 | + | |
118 | +Jobs are simple ruby objects with a method called perform. Any object which responds to perform can be stuffed into the jobs table. Job objects are serialized to yaml so that they can later be resurrected by the job runner. | |
119 | + | |
120 | +<pre> | |
121 | +class NewsletterJob < Struct.new(:text, :emails) | |
122 | + def perform | |
123 | + emails.each { |e| NewsletterMailer.deliver_text_to_email(text, e) } | |
124 | + end | |
125 | +end | |
126 | + | |
127 | +Delayed::Job.enqueue NewsletterJob.new('lorem ipsum...', Customers.find(:all).collect(&:email)) | |
128 | +</pre> | |
129 | + | |
130 | +You can also add an optional on_permanent_failure method which will run if the job has failed too many times to be retried: | |
131 | + | |
132 | +<pre> | |
133 | +class ParanoidNewsletterJob < NewsletterJob | |
134 | + def perform | |
135 | + emails.each { |e| NewsletterMailer.deliver_text_to_email(text, e) } | |
136 | + end | |
137 | + | |
138 | + def on_permanent_failure | |
139 | + page_sysadmin_in_the_middle_of_the_night | |
140 | + end | |
141 | +end | |
142 | +</pre> | |
143 | + | |
144 | +h2. Gory Details | |
145 | + | |
146 | +The library evolves around a delayed_jobs table which looks as follows: | |
147 | + | |
148 | +<pre> | |
149 | +create_table :delayed_jobs, :force => true do |table| | |
150 | + table.integer :priority, :default => 0 # Allows some jobs to jump to the front of the queue | |
151 | + table.integer :attempts, :default => 0 # Provides for retries, but still fail eventually. | |
152 | + table.text :handler # YAML-encoded string of the object that will do work | |
153 | + table.text :last_error # reason for last failure (See Note below) | |
154 | + table.datetime :run_at # When to run. Could be Time.zone.now for immediately, or sometime in the future. | |
155 | + table.datetime :locked_at # Set when a client is working on this object | |
156 | + table.datetime :failed_at # Set when all retries have failed (actually, by default, the record is deleted instead) | |
157 | + table.string :locked_by # Who is working on this object (if locked) | |
158 | + table.timestamps | |
159 | +end | |
160 | +</pre> | |
161 | + | |
162 | +On failure, the job is scheduled again in 5 seconds + N ** 4, where N is the number of retries. | |
163 | + | |
164 | +The default Worker.max_attempts is 25. After this, the job either deleted (default), or left in the database with "failed_at" set. | |
165 | +With the default of 25 attempts, the last retry will be 20 days later, with the last interval being almost 100 hours. | |
166 | + | |
167 | +The default Worker.max_run_time is 4.hours. If your job takes longer than that, another computer could pick it up. It's up to you to | |
168 | +make sure your job doesn't exceed this time. You should set this to the longest time you think the job could take. | |
169 | + | |
170 | +By default, it will delete failed jobs (and it always deletes successful jobs). If you want to keep failed jobs, set | |
171 | +Delayed::Worker.destroy_failed_jobs = false. The failed jobs will be marked with non-null failed_at. | |
172 | + | |
173 | +Here is an example of changing job parameters in Rails: | |
174 | + | |
175 | +<pre> | |
176 | +# config/initializers/delayed_job_config.rb | |
177 | +Delayed::Worker.destroy_failed_jobs = false | |
178 | +Delayed::Worker.sleep_delay = 60 | |
179 | +Delayed::Worker.max_attempts = 3 | |
180 | +Delayed::Worker.max_run_time = 5.minutes | |
181 | +</pre> | |
182 | + | |
183 | +h3. Cleaning up | |
184 | + | |
185 | +You can invoke @rake jobs:clear@ to delete all jobs in the queue. | |
186 | + | |
187 | +h2. Mailing List | |
188 | + | |
189 | +Join us on the mailing list at http://groups.google.com/group/delayed_job | |
190 | + | |
191 | +h2. How to contribute | |
192 | + | |
193 | +If you find what looks like a bug: | |
194 | + | |
195 | +# Check the GitHub issue tracker to see if anyone else has had the same issue. | |
196 | + http://github.com/collectiveidea/delayed_job/issues/ | |
197 | +# If you don't see anything, create an issue with information on how to reproduce it. | |
198 | + | |
199 | +If you want to contribute an enhancement or a fix: | |
200 | + | |
201 | +# Fork the project on github. | |
202 | + http://github.com/collectiveidea/delayed_job/ | |
203 | +# Make your changes with tests. | |
204 | +# Commit the changes without making changes to the Rakefile, VERSION, or any other files that aren't related to your enhancement or fix | |
205 | +# Send a pull request. | |
206 | + | |
207 | +h3. Changelog | |
208 | + | |
209 | +See http://wiki.github.com/collectiveidea/delayed_job/changelog for a list of changes. | |
210 | + | ... | ... |
... | ... | @@ -0,0 +1,53 @@ |
1 | +# -*- encoding: utf-8 -*- | |
2 | +begin | |
3 | + require 'jeweler' | |
4 | +rescue LoadError | |
5 | + puts "Jeweler not available. Install it with: sudo gem install jeweler" | |
6 | + exit 1 | |
7 | +end | |
8 | + | |
9 | +Jeweler::Tasks.new do |s| | |
10 | + s.name = "delayed_job" | |
11 | + s.summary = "Database-backed asynchronous priority queue system -- Extracted from Shopify" | |
12 | + s.email = "tobi@leetsoft.com" | |
13 | + s.homepage = "http://github.com/collectiveidea/delayed_job" | |
14 | + s.description = "Delayed_job (or DJ) encapsulates the common pattern of asynchronously executing longer tasks in the background. It is a direct extraction from Shopify where the job table is responsible for a multitude of core tasks.\n\nThis gem is collectiveidea's fork (http://github.com/collectiveidea/delayed_job)." | |
15 | + s.authors = ["Brandon Keepers", "Tobias Lütke"] | |
16 | + | |
17 | + s.has_rdoc = true | |
18 | + s.rdoc_options = ["--main", "README.textile", "--inline-source", "--line-numbers"] | |
19 | + s.extra_rdoc_files = ["README.textile"] | |
20 | + | |
21 | + s.test_files = Dir['spec/*_spec.rb'] | |
22 | + | |
23 | + s.add_dependency "daemons" | |
24 | + s.add_development_dependency "rspec" | |
25 | + s.add_development_dependency "sqlite3-ruby" | |
26 | + s.add_development_dependency "activerecord" | |
27 | + s.add_development_dependency "mongo_mapper" | |
28 | + s.add_development_dependency "dm-core" | |
29 | + s.add_development_dependency "dm-observer" | |
30 | + s.add_development_dependency "dm-aggregates" | |
31 | + s.add_development_dependency "dm-validations" | |
32 | + s.add_development_dependency "do_sqlite3" | |
33 | + s.add_development_dependency "couchrest" | |
34 | +end | |
35 | + | |
36 | +require 'spec/rake/spectask' | |
37 | + | |
38 | + | |
39 | +task :default do | |
40 | + %w(2.3.5 3.0.0.beta3).each do |version| | |
41 | + puts "Running specs with Rails #{version}" | |
42 | + system("RAILS_VERSION=#{version} rake -s spec;") | |
43 | + end | |
44 | +end | |
45 | + | |
46 | +desc 'Run the specs' | |
47 | +Spec::Rake::SpecTask.new(:spec) do |t| | |
48 | + t.libs << 'lib' | |
49 | + t.pattern = 'spec/*_spec.rb' | |
50 | + t.verbose = true | |
51 | +end | |
52 | +task :spec => :check_dependencies | |
53 | + | ... | ... |
... | ... | @@ -0,0 +1 @@ |
1 | +2.1.0.pre | ... | ... |
... | ... | @@ -0,0 +1,33 @@ |
1 | +$:.unshift(File.dirname(__FILE__) + '/lib') | |
2 | +require 'rubygems' | |
3 | +require 'logger' | |
4 | +require 'delayed_job' | |
5 | +require 'benchmark' | |
6 | + | |
7 | +RAILS_ENV = 'test' | |
8 | + | |
9 | +Delayed::Worker.logger = Logger.new('/dev/null') | |
10 | + | |
11 | +BACKENDS = [] | |
12 | +Dir.glob("#{File.dirname(__FILE__)}/spec/setup/*.rb") do |backend| | |
13 | + begin | |
14 | + backend = File.basename(backend, '.rb') | |
15 | + require "spec/setup/#{backend}" | |
16 | + BACKENDS << backend.to_sym | |
17 | + rescue LoadError | |
18 | + puts "Unable to load #{backend} backend! #{$!}" | |
19 | + end | |
20 | +end | |
21 | + | |
22 | + | |
23 | +Benchmark.bm(10) do |x| | |
24 | + BACKENDS.each do |backend| | |
25 | + require "spec/setup/#{backend}" | |
26 | + Delayed::Worker.backend = backend | |
27 | + | |
28 | + n = 10000 | |
29 | + n.times { "foo".delay.length } | |
30 | + | |
31 | + x.report(backend.to_s) { Delayed::Worker.new(:quiet => true).work_off(n) } | |
32 | + end | |
33 | +end | ... | ... |
... | ... | @@ -0,0 +1,14 @@ |
1 | +# an example Monit configuration file for delayed_job | |
2 | +# See: http://stackoverflow.com/questions/1226302/how-to-monitor-delayedjob-with-monit/1285611 | |
3 | +# | |
4 | +# To use: | |
5 | +# 1. copy to /var/www/apps/{app_name}/shared/delayed_job.monitrc | |
6 | +# 2. replace {app_name} as appropriate | |
7 | +# 3. add this to your /etc/monit/monitrc | |
8 | +# | |
9 | +# include /var/www/apps/{app_name}/shared/delayed_job.monitrc | |
10 | + | |
11 | +check process delayed_job | |
12 | + with pidfile /var/www/apps/{app_name}/shared/pids/delayed_job.pid | |
13 | + start program = "/usr/bin/env RAILS_ENV=production /var/www/apps/{app_name}/current/script/delayed_job start" | |
14 | + stop program = "/usr/bin/env RAILS_ENV=production /var/www/apps/{app_name}/current/script/delayed_job stop" | |
0 | 15 | \ No newline at end of file | ... | ... |
vendor/plugins/delayed_job/contrib/delayed_job_multiple.monitrc
0 → 100644
... | ... | @@ -0,0 +1,23 @@ |
1 | +# an example Monit configuration file for delayed_job running multiple processes | |
2 | +# | |
3 | +# To use: | |
4 | +# 1. copy to /var/www/apps/{app_name}/shared/delayed_job.monitrc | |
5 | +# 2. replace {app_name} as appropriate | |
6 | +# 3. add this to your /etc/monit/monitrc | |
7 | +# | |
8 | +# include /var/www/apps/{app_name}/shared/delayed_job.monitrc | |
9 | + | |
10 | +check process delayed_job_0 | |
11 | + with pidfile /var/www/apps/{app_name}/shared/pids/delayed_job.0.pid | |
12 | + start program = "/usr/bin/env RAILS_ENV=production /var/www/apps/{app_name}/current/script/delayed_job start -i 0" | |
13 | + stop program = "/usr/bin/env RAILS_ENV=production /var/www/apps/{app_name}/current/script/delayed_job stop -i 0" | |
14 | + | |
15 | +check process delayed_job_1 | |
16 | + with pidfile /var/www/apps/{app_name}/shared/pids/delayed_job.1.pid | |
17 | + start program = "/usr/bin/env RAILS_ENV=production /var/www/apps/{app_name}/current/script/delayed_job start -i 1" | |
18 | + stop program = "/usr/bin/env RAILS_ENV=production /var/www/apps/{app_name}/current/script/delayed_job stop -i 1" | |
19 | + | |
20 | +check process delayed_job_2 | |
21 | + with pidfile /var/www/apps/{app_name}/shared/pids/delayed_job.2.pid | |
22 | + start program = "/usr/bin/env RAILS_ENV=production /var/www/apps/{app_name}/current/script/delayed_job start -i 2" | |
23 | + stop program = "/usr/bin/env RAILS_ENV=production /var/www/apps/{app_name}/current/script/delayed_job stop -i 2" | |
0 | 24 | \ No newline at end of file | ... | ... |
... | ... | @@ -0,0 +1,125 @@ |
1 | +# Generated by jeweler | |
2 | +# DO NOT EDIT THIS FILE DIRECTLY | |
3 | +# Instead, edit Jeweler::Tasks in Rakefile, and run the gemspec command | |
4 | +# -*- encoding: utf-8 -*- | |
5 | + | |
6 | +Gem::Specification.new do |s| | |
7 | + s.name = %q{delayed_job} | |
8 | + s.version = "2.1.0.pre" | |
9 | + | |
10 | + s.required_rubygems_version = Gem::Requirement.new("> 1.3.1") if s.respond_to? :required_rubygems_version= | |
11 | + s.authors = ["Brandon Keepers", "Tobias L\303\274tke"] | |
12 | + s.date = %q{2010-05-21} | |
13 | + s.description = %q{Delayed_job (or DJ) encapsulates the common pattern of asynchronously executing longer tasks in the background. It is a direct extraction from Shopify where the job table is responsible for a multitude of core tasks. | |
14 | + | |
15 | +This gem is collectiveidea's fork (http://github.com/collectiveidea/delayed_job).} | |
16 | + s.email = %q{tobi@leetsoft.com} | |
17 | + s.extra_rdoc_files = [ | |
18 | + "README.textile" | |
19 | + ] | |
20 | + s.files = [ | |
21 | + ".gitignore", | |
22 | + "MIT-LICENSE", | |
23 | + "README.textile", | |
24 | + "Rakefile", | |
25 | + "VERSION", | |
26 | + "benchmarks.rb", | |
27 | + "contrib/delayed_job.monitrc", | |
28 | + "contrib/delayed_job_multiple.monitrc", | |
29 | + "delayed_job.gemspec", | |
30 | + "generators/delayed_job/delayed_job_generator.rb", | |
31 | + "generators/delayed_job/templates/migration.rb", | |
32 | + "generators/delayed_job/templates/script", | |
33 | + "init.rb", | |
34 | + "lib/delayed/backend/active_record.rb", | |
35 | + "lib/delayed/backend/base.rb", | |
36 | + "lib/delayed/backend/couch_rest.rb", | |
37 | + "lib/delayed/backend/data_mapper.rb", | |
38 | + "lib/delayed/backend/mongo_mapper.rb", | |
39 | + "lib/delayed/command.rb", | |
40 | + "lib/delayed/message_sending.rb", | |
41 | + "lib/delayed/performable_method.rb", | |
42 | + "lib/delayed/railtie.rb", | |
43 | + "lib/delayed/recipes.rb", | |
44 | + "lib/delayed/tasks.rb", | |
45 | + "lib/delayed/worker.rb", | |
46 | + "lib/delayed/yaml_ext.rb", | |
47 | + "lib/delayed_job.rb", | |
48 | + "lib/generators/delayed_job/delayed_job_generator.rb", | |
49 | + "lib/generators/delayed_job/templates/migration.rb", | |
50 | + "lib/generators/delayed_job/templates/script", | |
51 | + "rails/init.rb", | |
52 | + "recipes/delayed_job.rb", | |
53 | + "spec/autoloaded/clazz.rb", | |
54 | + "spec/autoloaded/struct.rb", | |
55 | + "spec/backend/active_record_job_spec.rb", | |
56 | + "spec/backend/couch_rest_job_spec.rb", | |
57 | + "spec/backend/data_mapper_job_spec.rb", | |
58 | + "spec/backend/mongo_mapper_job_spec.rb", | |
59 | + "spec/backend/shared_backend_spec.rb", | |
60 | + "spec/message_sending_spec.rb", | |
61 | + "spec/performable_method_spec.rb", | |
62 | + "spec/sample_jobs.rb", | |
63 | + "spec/setup/active_record.rb", | |
64 | + "spec/setup/couch_rest.rb", | |
65 | + "spec/setup/data_mapper.rb", | |
66 | + "spec/setup/mongo_mapper.rb", | |
67 | + "spec/spec_helper.rb", | |
68 | + "spec/worker_spec.rb", | |
69 | + "tasks/jobs.rake" | |
70 | + ] | |
71 | + s.homepage = %q{http://github.com/collectiveidea/delayed_job} | |
72 | + s.rdoc_options = ["--main", "README.textile", "--inline-source", "--line-numbers"] | |
73 | + s.require_paths = ["lib"] | |
74 | + s.rubygems_version = %q{1.3.6} | |
75 | + s.summary = %q{Database-backed asynchronous priority queue system -- Extracted from Shopify} | |
76 | + s.test_files = [ | |
77 | + "spec/message_sending_spec.rb", | |
78 | + "spec/performable_method_spec.rb", | |
79 | + "spec/worker_spec.rb" | |
80 | + ] | |
81 | + | |
82 | + if s.respond_to? :specification_version then | |
83 | + current_version = Gem::Specification::CURRENT_SPECIFICATION_VERSION | |
84 | + s.specification_version = 3 | |
85 | + | |
86 | + if Gem::Version.new(Gem::RubyGemsVersion) >= Gem::Version.new('1.2.0') then | |
87 | + s.add_runtime_dependency(%q<daemons>, [">= 0"]) | |
88 | + s.add_development_dependency(%q<rspec>, [">= 0"]) | |
89 | + s.add_development_dependency(%q<sqlite3-ruby>, [">= 0"]) | |
90 | + s.add_development_dependency(%q<activerecord>, [">= 0"]) | |
91 | + s.add_development_dependency(%q<mongo_mapper>, [">= 0"]) | |
92 | + s.add_development_dependency(%q<dm-core>, [">= 0"]) | |
93 | + s.add_development_dependency(%q<dm-observer>, [">= 0"]) | |
94 | + s.add_development_dependency(%q<dm-aggregates>, [">= 0"]) | |
95 | + s.add_development_dependency(%q<dm-validations>, [">= 0"]) | |
96 | + s.add_development_dependency(%q<do_sqlite3>, [">= 0"]) | |
97 | + s.add_development_dependency(%q<couchrest>, [">= 0"]) | |
98 | + else | |
99 | + s.add_dependency(%q<daemons>, [">= 0"]) | |
100 | + s.add_dependency(%q<rspec>, [">= 0"]) | |
101 | + s.add_dependency(%q<sqlite3-ruby>, [">= 0"]) | |
102 | + s.add_dependency(%q<activerecord>, [">= 0"]) | |
103 | + s.add_dependency(%q<mongo_mapper>, [">= 0"]) | |
104 | + s.add_dependency(%q<dm-core>, [">= 0"]) | |
105 | + s.add_dependency(%q<dm-observer>, [">= 0"]) | |
106 | + s.add_dependency(%q<dm-aggregates>, [">= 0"]) | |
107 | + s.add_dependency(%q<dm-validations>, [">= 0"]) | |
108 | + s.add_dependency(%q<do_sqlite3>, [">= 0"]) | |
109 | + s.add_dependency(%q<couchrest>, [">= 0"]) | |
110 | + end | |
111 | + else | |
112 | + s.add_dependency(%q<daemons>, [">= 0"]) | |
113 | + s.add_dependency(%q<rspec>, [">= 0"]) | |
114 | + s.add_dependency(%q<sqlite3-ruby>, [">= 0"]) | |
115 | + s.add_dependency(%q<activerecord>, [">= 0"]) | |
116 | + s.add_dependency(%q<mongo_mapper>, [">= 0"]) | |
117 | + s.add_dependency(%q<dm-core>, [">= 0"]) | |
118 | + s.add_dependency(%q<dm-observer>, [">= 0"]) | |
119 | + s.add_dependency(%q<dm-aggregates>, [">= 0"]) | |
120 | + s.add_dependency(%q<dm-validations>, [">= 0"]) | |
121 | + s.add_dependency(%q<do_sqlite3>, [">= 0"]) | |
122 | + s.add_dependency(%q<couchrest>, [">= 0"]) | |
123 | + end | |
124 | +end | |
125 | + | ... | ... |
vendor/plugins/delayed_job/generators/delayed_job/delayed_job_generator.rb
0 → 100644
... | ... | @@ -0,0 +1,22 @@ |
1 | +class DelayedJobGenerator < Rails::Generator::Base | |
2 | + default_options :skip_migration => false | |
3 | + | |
4 | + def manifest | |
5 | + record do |m| | |
6 | + m.template 'script', 'script/delayed_job', :chmod => 0755 | |
7 | + if !options[:skip_migration] && defined?(ActiveRecord) | |
8 | + m.migration_template "migration.rb", 'db/migrate', | |
9 | + :migration_file_name => "create_delayed_jobs" | |
10 | + end | |
11 | + end | |
12 | + end | |
13 | + | |
14 | +protected | |
15 | + | |
16 | + def add_options!(opt) | |
17 | + opt.separator '' | |
18 | + opt.separator 'Options:' | |
19 | + opt.on("--skip-migration", "Don't generate a migration") { |v| options[:skip_migration] = v } | |
20 | + end | |
21 | + | |
22 | +end | ... | ... |
vendor/plugins/delayed_job/generators/delayed_job/templates/migration.rb
0 → 100644
... | ... | @@ -0,0 +1,21 @@ |
1 | +class CreateDelayedJobs < ActiveRecord::Migration | |
2 | + def self.up | |
3 | + create_table :delayed_jobs, :force => true do |table| | |
4 | + table.integer :priority, :default => 0 # Allows some jobs to jump to the front of the queue | |
5 | + table.integer :attempts, :default => 0 # Provides for retries, but still fail eventually. | |
6 | + table.text :handler # YAML-encoded string of the object that will do work | |
7 | + table.text :last_error # reason for last failure (See Note below) | |
8 | + table.datetime :run_at # When to run. Could be Time.zone.now for immediately, or sometime in the future. | |
9 | + table.datetime :locked_at # Set when a client is working on this object | |
10 | + table.datetime :failed_at # Set when all retries have failed (actually, by default, the record is deleted instead) | |
11 | + table.string :locked_by # Who is working on this object (if locked) | |
12 | + table.timestamps | |
13 | + end | |
14 | + | |
15 | + add_index :delayed_jobs, [:priority, :run_at], :name => 'delayed_jobs_priority' | |
16 | + end | |
17 | + | |
18 | + def self.down | |
19 | + drop_table :delayed_jobs | |
20 | + end | |
21 | +end | |
0 | 22 | \ No newline at end of file | ... | ... |
vendor/plugins/delayed_job/lib/delayed/backend/active_record.rb
0 → 100644
... | ... | @@ -0,0 +1,98 @@ |
1 | +require 'active_record' | |
2 | +require 'active_record/version' | |
3 | + | |
4 | +class ActiveRecord::Base | |
5 | + yaml_as "tag:ruby.yaml.org,2002:ActiveRecord" | |
6 | + | |
7 | + def self.yaml_new(klass, tag, val) | |
8 | + klass.find(val['attributes']['id']) | |
9 | + rescue ActiveRecord::RecordNotFound | |
10 | + nil | |
11 | + end | |
12 | + | |
13 | + def to_yaml_properties | |
14 | + ['@attributes'] | |
15 | + end | |
16 | +end | |
17 | + | |
18 | +module Delayed | |
19 | + module Backend | |
20 | + module ActiveRecord | |
21 | + # A job object that is persisted to the database. | |
22 | + # Contains the work object as a YAML field. | |
23 | + class Job < ::ActiveRecord::Base | |
24 | + include Delayed::Backend::Base | |
25 | + set_table_name :delayed_jobs | |
26 | + | |
27 | + before_save :set_default_run_at | |
28 | + | |
29 | + if ::ActiveRecord::VERSION::MAJOR >= 3 | |
30 | + scope :ready_to_run, lambda {|worker_name, max_run_time| | |
31 | + where(['(run_at <= ? AND (locked_at IS NULL OR locked_at < ?) OR locked_by = ?) AND failed_at IS NULL', db_time_now, db_time_now - max_run_time, worker_name]) | |
32 | + } | |
33 | + scope :by_priority, order('priority ASC, run_at ASC') | |
34 | + else | |
35 | + named_scope :ready_to_run, lambda {|worker_name, max_run_time| | |
36 | + {:conditions => ['(run_at <= ? AND (locked_at IS NULL OR locked_at < ?) OR locked_by = ?) AND failed_at IS NULL', db_time_now, db_time_now - max_run_time, worker_name]} | |
37 | + } | |
38 | + named_scope :by_priority, :order => 'priority ASC, run_at ASC' | |
39 | + end | |
40 | + | |
41 | + def self.after_fork | |
42 | + ::ActiveRecord::Base.connection.reconnect! | |
43 | + end | |
44 | + | |
45 | + # When a worker is exiting, make sure we don't have any locked jobs. | |
46 | + def self.clear_locks!(worker_name) | |
47 | + update_all("locked_by = null, locked_at = null", ["locked_by = ?", worker_name]) | |
48 | + end | |
49 | + | |
50 | + # Find a few candidate jobs to run (in case some immediately get locked by others). | |
51 | + def self.find_available(worker_name, limit = 5, max_run_time = Worker.max_run_time) | |
52 | + scope = self.ready_to_run(worker_name, max_run_time) | |
53 | + scope = scope.scoped(:conditions => ['priority >= ?', Worker.min_priority]) if Worker.min_priority | |
54 | + scope = scope.scoped(:conditions => ['priority <= ?', Worker.max_priority]) if Worker.max_priority | |
55 | + | |
56 | + ::ActiveRecord::Base.silence do | |
57 | + scope.by_priority.all(:limit => limit) | |
58 | + end | |
59 | + end | |
60 | + | |
61 | + # Lock this job for this worker. | |
62 | + # Returns true if we have the lock, false otherwise. | |
63 | + def lock_exclusively!(max_run_time, worker) | |
64 | + now = self.class.db_time_now | |
65 | + affected_rows = if locked_by != worker | |
66 | + # We don't own this job so we will update the locked_by name and the locked_at | |
67 | + self.class.update_all(["locked_at = ?, locked_by = ?", now, worker], ["id = ? and (locked_at is null or locked_at < ?) and (run_at <= ?)", id, (now - max_run_time.to_i), now]) | |
68 | + else | |
69 | + # We already own this job, this may happen if the job queue crashes. | |
70 | + # Simply resume and update the locked_at | |
71 | + self.class.update_all(["locked_at = ?", now], ["id = ? and locked_by = ?", id, worker]) | |
72 | + end | |
73 | + if affected_rows == 1 | |
74 | + self.locked_at = now | |
75 | + self.locked_by = worker | |
76 | + return true | |
77 | + else | |
78 | + return false | |
79 | + end | |
80 | + end | |
81 | + | |
82 | + # Get the current time (GMT or local depending on DB) | |
83 | + # Note: This does not ping the DB to get the time, so all your clients | |
84 | + # must have syncronized clocks. | |
85 | + def self.db_time_now | |
86 | + if Time.zone | |
87 | + Time.zone.now | |
88 | + elsif ::ActiveRecord::Base.default_timezone == :utc | |
89 | + Time.now.utc | |
90 | + else | |
91 | + Time.now | |
92 | + end | |
93 | + end | |
94 | + | |
95 | + end | |
96 | + end | |
97 | + end | |
98 | +end | ... | ... |
... | ... | @@ -0,0 +1,82 @@ |
1 | +module Delayed | |
2 | + module Backend | |
3 | + class DeserializationError < StandardError | |
4 | + end | |
5 | + | |
6 | + module Base | |
7 | + def self.included(base) | |
8 | + base.extend ClassMethods | |
9 | + end | |
10 | + | |
11 | + module ClassMethods | |
12 | + # Add a job to the queue | |
13 | + def enqueue(*args) | |
14 | + object = args.shift | |
15 | + unless object.respond_to?(:perform) | |
16 | + raise ArgumentError, 'Cannot enqueue items which do not respond to perform' | |
17 | + end | |
18 | + | |
19 | + priority = args.first || Delayed::Worker.default_priority | |
20 | + run_at = args[1] | |
21 | + self.create(:payload_object => object, :priority => priority.to_i, :run_at => run_at) | |
22 | + end | |
23 | + | |
24 | + # Hook method that is called before a new worker is forked | |
25 | + def before_fork | |
26 | + end | |
27 | + | |
28 | + # Hook method that is called after a new worker is forked | |
29 | + def after_fork | |
30 | + end | |
31 | + | |
32 | + def work_off(num = 100) | |
33 | + warn "[DEPRECATION] `Delayed::Job.work_off` is deprecated. Use `Delayed::Worker.new.work_off instead." | |
34 | + Delayed::Worker.new.work_off(num) | |
35 | + end | |
36 | + end | |
37 | + | |
38 | + ParseObjectFromYaml = /\!ruby\/\w+\:([^\s]+)/ | |
39 | + | |
40 | + def failed? | |
41 | + failed_at | |
42 | + end | |
43 | + alias_method :failed, :failed? | |
44 | + | |
45 | + def name | |
46 | + @name ||= begin | |
47 | + payload = payload_object | |
48 | + payload.respond_to?(:display_name) ? payload.display_name : payload.class.name | |
49 | + end | |
50 | + end | |
51 | + | |
52 | + def payload_object=(object) | |
53 | + self.handler = object.to_yaml | |
54 | + end | |
55 | + | |
56 | + def payload_object | |
57 | + @payload_object ||= YAML.load(self.handler) | |
58 | + rescue TypeError, LoadError, NameError => e | |
59 | + raise DeserializationError, | |
60 | + "Job failed to load: #{e.message}. Try to manually require the required file. Handler: #{handler.inspect}" | |
61 | + end | |
62 | + | |
63 | + # Moved into its own method so that new_relic can trace it. | |
64 | + def invoke_job | |
65 | + payload_object.perform | |
66 | + end | |
67 | + | |
68 | + # Unlock this job (note: not saved to DB) | |
69 | + def unlock | |
70 | + self.locked_at = nil | |
71 | + self.locked_by = nil | |
72 | + end | |
73 | + | |
74 | + protected | |
75 | + | |
76 | + def set_default_run_at | |
77 | + self.run_at ||= self.class.db_time_now | |
78 | + end | |
79 | + | |
80 | + end | |
81 | + end | |
82 | +end | ... | ... |
vendor/plugins/delayed_job/lib/delayed/backend/couch_rest.rb
0 → 100644
... | ... | @@ -0,0 +1,109 @@ |
1 | +require 'couchrest' | |
2 | + | |
3 | +#extent couchrest to handle delayed_job serialization. | |
4 | +class CouchRest::ExtendedDocument | |
5 | + yaml_as "tag:ruby.yaml.org,2002:CouchRest" | |
6 | + | |
7 | + def reload | |
8 | + job = self.class.get self['_id'] | |
9 | + job.each {|k,v| self[k] = v} | |
10 | + end | |
11 | + def self.find(id) | |
12 | + get id | |
13 | + end | |
14 | + def self.yaml_new(klass, tag, val) | |
15 | + klass.get(val['_id']) | |
16 | + end | |
17 | + def ==(other) | |
18 | + if other.is_a? ::CouchRest::ExtendedDocument | |
19 | + self['_id'] == other['_id'] | |
20 | + else | |
21 | + super | |
22 | + end | |
23 | + end | |
24 | +end | |
25 | + | |
26 | +#couchrest adapter | |
27 | +module Delayed | |
28 | + module Backend | |
29 | + module CouchRest | |
30 | + class Job < ::CouchRest::ExtendedDocument | |
31 | + include Delayed::Backend::Base | |
32 | + use_database ::CouchRest::Server.new.database('delayed_job') | |
33 | + | |
34 | + property :handler | |
35 | + property :last_error | |
36 | + property :locked_by | |
37 | + property :priority, :default => 0 | |
38 | + property :attempts, :default => 0 | |
39 | + property :run_at, :cast_as => 'Time' | |
40 | + property :locked_at, :cast_as => 'Time' | |
41 | + property :failed_at, :cast_as => 'Time' | |
42 | + timestamps! | |
43 | + | |
44 | + set_callback :save, :before, :set_default_run_at | |
45 | + | |
46 | + view_by(:failed_at, :locked_by, :run_at, | |
47 | + :map => "function(doc){" + | |
48 | + " if(doc['couchrest-type'] == 'Delayed::Backend::CouchRest::Job') {" + | |
49 | + " emit([doc.failed_at || null, doc.locked_by || null, doc.run_at || null], null);}" + | |
50 | + " }") | |
51 | + view_by(:failed_at, :locked_at, :run_at, | |
52 | + :map => "function(doc){" + | |
53 | + " if(doc['couchrest-type'] == 'Delayed::Backend::CouchRest::Job') {" + | |
54 | + " emit([doc.failed_at || null, doc.locked_at || null, doc.run_at || null], null);}" + | |
55 | + " }") | |
56 | + | |
57 | + def self.db_time_now; Time.now; end | |
58 | + def self.find_available(worker_name, limit = 5, max_run_time = ::Delayed::Worker.max_run_time) | |
59 | + ready = ready_jobs | |
60 | + mine = my_jobs worker_name | |
61 | + expire = expired_jobs max_run_time | |
62 | + jobs = (ready + mine + expire)[0..limit-1].sort_by { |j| j.priority } | |
63 | + jobs = jobs.find_all { |j| j.priority >= Worker.min_priority } if Worker.min_priority | |
64 | + jobs = jobs.find_all { |j| j.priority <= Worker.max_priority } if Worker.max_priority | |
65 | + jobs | |
66 | + end | |
67 | + def self.clear_locks!(worker_name) | |
68 | + jobs = my_jobs worker_name | |
69 | + jobs.each { |j| j.locked_by, j.locked_at = nil, nil; } | |
70 | + database.bulk_save jobs | |
71 | + end | |
72 | + def self.delete_all | |
73 | + database.bulk_save all.each { |doc| doc['_deleted'] = true } | |
74 | + end | |
75 | + | |
76 | + def lock_exclusively!(max_run_time, worker = worker_name) | |
77 | + return false if locked_by_other?(worker) and not expired?(max_run_time) | |
78 | + case | |
79 | + when locked_by_me?(worker) | |
80 | + self.locked_at = self.class.db_time_now | |
81 | + when (unlocked? or (locked_by_other?(worker) and expired?(max_run_time))) | |
82 | + self.locked_at, self.locked_by = self.class.db_time_now, worker | |
83 | + end | |
84 | + save | |
85 | + rescue RestClient::Conflict | |
86 | + false | |
87 | + end | |
88 | + | |
89 | + private | |
90 | + def self.ready_jobs | |
91 | + options = {:startkey => [nil, nil], :endkey => [nil, nil, db_time_now]} | |
92 | + by_failed_at_and_locked_by_and_run_at options | |
93 | + end | |
94 | + def self.my_jobs(worker_name) | |
95 | + options = {:startkey => [nil, worker_name], :endkey => [nil, worker_name, {}]} | |
96 | + by_failed_at_and_locked_by_and_run_at options | |
97 | + end | |
98 | + def self.expired_jobs(max_run_time) | |
99 | + options = {:startkey => [nil,'0'], :endkey => [nil, db_time_now - max_run_time, db_time_now]} | |
100 | + by_failed_at_and_locked_at_and_run_at options | |
101 | + end | |
102 | + def unlocked?; locked_by.nil?; end | |
103 | + def expired?(time); locked_at < self.class.db_time_now - time; end | |
104 | + def locked_by_me?(worker); not locked_by.nil? and locked_by == worker; end | |
105 | + def locked_by_other?(worker); not locked_by.nil? and locked_by != worker; end | |
106 | + end | |
107 | + end | |
108 | + end | |
109 | +end | ... | ... |
vendor/plugins/delayed_job/lib/delayed/backend/data_mapper.rb
0 → 100644
... | ... | @@ -0,0 +1,121 @@ |
1 | +require 'dm-core' | |
2 | +require 'dm-observer' | |
3 | +require 'dm-aggregates' | |
4 | + | |
5 | +DataMapper::Resource.class_eval do | |
6 | + yaml_as "tag:ruby.yaml.org,2002:DataMapper" | |
7 | + | |
8 | + def self.yaml_new(klass, tag, val) | |
9 | + klass.find(val['id']) | |
10 | + end | |
11 | + | |
12 | + def to_yaml_properties | |
13 | + ['@id'] | |
14 | + end | |
15 | +end | |
16 | + | |
17 | +module Delayed | |
18 | + module Backend | |
19 | + module DataMapper | |
20 | + class Job | |
21 | + include ::DataMapper::Resource | |
22 | + include Delayed::Backend::Base | |
23 | + | |
24 | + storage_names[:default] = 'delayed_jobs' | |
25 | + | |
26 | + property :id, Serial | |
27 | + property :priority, Integer, :default => 0, :index => :run_at_priority | |
28 | + property :attempts, Integer, :default => 0 | |
29 | + property :handler, Text, :lazy => false | |
30 | + property :run_at, Time, :index => :run_at_priority | |
31 | + property :locked_at, Time, :index => true | |
32 | + property :locked_by, String | |
33 | + property :failed_at, Time | |
34 | + property :last_error, Text | |
35 | + | |
36 | + def self.db_time_now | |
37 | + Time.now | |
38 | + end | |
39 | + | |
40 | + def self.find_available(worker_name, limit = 5, max_run_time = Worker.max_run_time) | |
41 | + | |
42 | + simple_conditions = { :run_at.lte => db_time_now, :limit => limit, :failed_at => nil, :order => [:priority.asc, :run_at.asc] } | |
43 | + | |
44 | + # respect priorities | |
45 | + simple_conditions[:priority.gte] = Worker.min_priority if Worker.min_priority | |
46 | + simple_conditions[:priority.lte] = Worker.max_priority if Worker.max_priority | |
47 | + | |
48 | + # lockable | |
49 | + lockable = ( | |
50 | + # not locked or past the max time | |
51 | + ( all(:locked_at => nil ) | all(:locked_at.lt => db_time_now - max_run_time)) | | |
52 | + | |
53 | + # OR locked by our worker | |
54 | + all(:locked_by => worker_name)) | |
55 | + | |
56 | + # plus some other boring junk | |
57 | + (lockable).all( simple_conditions ) | |
58 | + end | |
59 | + | |
60 | + # When a worker is exiting, make sure we don't have any locked jobs. | |
61 | + def self.clear_locks!(worker_name) | |
62 | + all(:locked_by => worker_name).update(:locked_at => nil, :locked_by => nil) | |
63 | + end | |
64 | + | |
65 | + # Lock this job for this worker. | |
66 | + # Returns true if we have the lock, false otherwise. | |
67 | + def lock_exclusively!(max_run_time, worker = worker_name) | |
68 | + | |
69 | + now = self.class.db_time_now | |
70 | + overtime = now - max_run_time | |
71 | + | |
72 | + # FIXME - this is a bit gross | |
73 | + # DM doesn't give us the number of rows affected by a collection update | |
74 | + # so we have to circumvent some niceness in DM::Collection here | |
75 | + collection = locked_by != worker ? | |
76 | + (self.class.all(:id => id, :run_at.lte => now) & ( self.class.all(:locked_at => nil) | self.class.all(:locked_at.lt => overtime) ) ) : | |
77 | + self.class.all(:id => id, :locked_by => worker) | |
78 | + | |
79 | + attributes = collection.model.new(:locked_at => now, :locked_by => worker).dirty_attributes | |
80 | + affected_rows = self.repository.update(attributes, collection) | |
81 | + | |
82 | + if affected_rows == 1 | |
83 | + self.locked_at = now | |
84 | + self.locked_by = worker | |
85 | + return true | |
86 | + else | |
87 | + return false | |
88 | + end | |
89 | + end | |
90 | + | |
91 | + # these are common to the other backends, so we provide an implementation | |
92 | + def self.delete_all | |
93 | + Delayed::Job.auto_migrate! | |
94 | + end | |
95 | + | |
96 | + def self.find id | |
97 | + get id | |
98 | + end | |
99 | + | |
100 | + def update_attributes(attributes) | |
101 | + attributes.each do |k,v| | |
102 | + self[k] = v | |
103 | + end | |
104 | + self.save | |
105 | + end | |
106 | + | |
107 | + | |
108 | + end | |
109 | + | |
110 | + class JobObserver | |
111 | + include ::DataMapper::Observer | |
112 | + | |
113 | + observe Job | |
114 | + | |
115 | + before :save do | |
116 | + self.run_at ||= self.class.db_time_now | |
117 | + end | |
118 | + end | |
119 | + end | |
120 | + end | |
121 | +end | ... | ... |
vendor/plugins/delayed_job/lib/delayed/backend/mongo_mapper.rb
0 → 100644
... | ... | @@ -0,0 +1,106 @@ |
1 | +require 'mongo_mapper' | |
2 | + | |
3 | +MongoMapper::Document.class_eval do | |
4 | + yaml_as "tag:ruby.yaml.org,2002:MongoMapper" | |
5 | + | |
6 | + def self.yaml_new(klass, tag, val) | |
7 | + klass.find(val['_id']) | |
8 | + end | |
9 | + | |
10 | + def to_yaml_properties | |
11 | + ['@_id'] | |
12 | + end | |
13 | +end | |
14 | + | |
15 | +module Delayed | |
16 | + module Backend | |
17 | + module MongoMapper | |
18 | + class Job | |
19 | + include ::MongoMapper::Document | |
20 | + include Delayed::Backend::Base | |
21 | + set_collection_name 'delayed_jobs' | |
22 | + | |
23 | + key :priority, Integer, :default => 0 | |
24 | + key :attempts, Integer, :default => 0 | |
25 | + key :handler, String | |
26 | + key :run_at, Time | |
27 | + key :locked_at, Time | |
28 | + key :locked_by, String, :index => true | |
29 | + key :failed_at, Time | |
30 | + key :last_error, String | |
31 | + timestamps! | |
32 | + | |
33 | + before_save :set_default_run_at | |
34 | + | |
35 | + ensure_index [[:priority, 1], [:run_at, 1]] | |
36 | + | |
37 | + def self.before_fork | |
38 | + ::MongoMapper.connection.close | |
39 | + end | |
40 | + | |
41 | + def self.after_fork | |
42 | + ::MongoMapper.connect(RAILS_ENV) | |
43 | + end | |
44 | + | |
45 | + def self.db_time_now | |
46 | + Time.now.utc | |
47 | + end | |
48 | + | |
49 | + def self.find_available(worker_name, limit = 5, max_run_time = Worker.max_run_time) | |
50 | + right_now = db_time_now | |
51 | + | |
52 | + conditions = { | |
53 | + :run_at => {"$lte" => right_now}, | |
54 | + :limit => -limit, # In mongo, positive limits are 'soft' and negative are 'hard' | |
55 | + :failed_at => nil, | |
56 | + :sort => [['priority', 1], ['run_at', 1]] | |
57 | + } | |
58 | + | |
59 | + where = "this.locked_at == null || this.locked_at < #{make_date(right_now - max_run_time)}" | |
60 | + | |
61 | + (conditions[:priority] ||= {})['$gte'] = Worker.min_priority.to_i if Worker.min_priority | |
62 | + (conditions[:priority] ||= {})['$lte'] = Worker.max_priority.to_i if Worker.max_priority | |
63 | + | |
64 | + results = all(conditions.merge(:locked_by => worker_name)) | |
65 | + results += all(conditions.merge('$where' => where)) if results.size < limit | |
66 | + results | |
67 | + end | |
68 | + | |
69 | + # When a worker is exiting, make sure we don't have any locked jobs. | |
70 | + def self.clear_locks!(worker_name) | |
71 | + collection.update({:locked_by => worker_name}, {"$set" => {:locked_at => nil, :locked_by => nil}}, :multi => true) | |
72 | + end | |
73 | + | |
74 | + # Lock this job for this worker. | |
75 | + # Returns true if we have the lock, false otherwise. | |
76 | + def lock_exclusively!(max_run_time, worker = worker_name) | |
77 | + right_now = self.class.db_time_now | |
78 | + overtime = right_now - max_run_time.to_i | |
79 | + | |
80 | + query = "this.locked_at == null || this.locked_at < #{make_date(overtime)} || this.locked_by == #{worker.to_json}" | |
81 | + conditions = {:_id => id, :run_at => {"$lte" => right_now}, "$where" => query} | |
82 | + | |
83 | + collection.update(conditions, {"$set" => {:locked_at => right_now, :locked_by => worker}}) | |
84 | + affected_rows = collection.find({:_id => id, :locked_by => worker}).count | |
85 | + if affected_rows == 1 | |
86 | + self.locked_at = right_now | |
87 | + self.locked_by = worker | |
88 | + return true | |
89 | + else | |
90 | + return false | |
91 | + end | |
92 | + end | |
93 | + | |
94 | + private | |
95 | + | |
96 | + def self.make_date(date_or_seconds) | |
97 | + "new Date(#{date_or_seconds.to_f * 1000})" | |
98 | + end | |
99 | + | |
100 | + def make_date(date) | |
101 | + self.class.make_date(date) | |
102 | + end | |
103 | + end | |
104 | + end | |
105 | + end | |
106 | +end | ... | ... |
... | ... | @@ -0,0 +1,107 @@ |
1 | +require 'rubygems' | |
2 | +require 'daemons' | |
3 | +require 'optparse' | |
4 | + | |
5 | +module Delayed | |
6 | + class Command | |
7 | + attr_accessor :worker_count | |
8 | + | |
9 | + def initialize(args) | |
10 | + @files_to_reopen = [] | |
11 | + @options = { | |
12 | + :quiet => true, | |
13 | + :pid_dir => "#{Rails.root}/tmp/pids" | |
14 | + } | |
15 | + | |
16 | + @worker_count = 1 | |
17 | + @monitor = false | |
18 | + | |
19 | + opts = OptionParser.new do |opts| | |
20 | + opts.banner = "Usage: #{File.basename($0)} [options] start|stop|restart|run" | |
21 | + | |
22 | + opts.on('-h', '--help', 'Show this message') do | |
23 | + puts opts | |
24 | + exit 1 | |
25 | + end | |
26 | + opts.on('-e', '--environment=NAME', 'Specifies the environment to run this delayed jobs under (test/development/production).') do |e| | |
27 | + STDERR.puts "The -e/--environment option has been deprecated and has no effect. Use RAILS_ENV and see http://github.com/collectiveidea/delayed_job/issues/#issue/7" | |
28 | + end | |
29 | + opts.on('--min-priority N', 'Minimum priority of jobs to run.') do |n| | |
30 | + @options[:min_priority] = n | |
31 | + end | |
32 | + opts.on('--max-priority N', 'Maximum priority of jobs to run.') do |n| | |
33 | + @options[:max_priority] = n | |
34 | + end | |
35 | + opts.on('-n', '--number_of_workers=workers', "Number of unique workers to spawn") do |worker_count| | |
36 | + @worker_count = worker_count.to_i rescue 1 | |
37 | + end | |
38 | + opts.on('--pid-dir=DIR', 'Specifies an alternate directory in which to store the process ids.') do |dir| | |
39 | + @options[:pid_dir] = dir | |
40 | + end | |
41 | + opts.on('-i', '--identifier=n', 'A numeric identifier for the worker.') do |n| | |
42 | + @options[:identifier] = n | |
43 | + end | |
44 | + opts.on('-m', '--monitor', 'Start monitor process.') do | |
45 | + @monitor = true | |
46 | + end | |
47 | + | |
48 | + | |
49 | + end | |
50 | + @args = opts.parse!(args) | |
51 | + end | |
52 | + | |
53 | + def daemonize | |
54 | + Delayed::Worker.backend.before_fork | |
55 | + | |
56 | + ObjectSpace.each_object(File) do |file| | |
57 | + @files_to_reopen << file unless file.closed? | |
58 | + end | |
59 | + | |
60 | + dir = @options[:pid_dir] | |
61 | + Dir.mkdir(dir) unless File.exists?(dir) | |
62 | + | |
63 | + if @worker_count > 1 && @options[:identifier] | |
64 | + raise ArgumentError, 'Cannot specify both --number-of-workers and --identifier' | |
65 | + elsif @worker_count == 1 && @options[:identifier] | |
66 | + process_name = "delayed_job.#{@options[:identifier]}" | |
67 | + run_process(process_name, dir) | |
68 | + else | |
69 | + worker_count.times do |worker_index| | |
70 | + process_name = worker_count == 1 ? "delayed_job" : "delayed_job.#{worker_index}" | |
71 | + run_process(process_name, dir) | |
72 | + end | |
73 | + end | |
74 | + end | |
75 | + | |
76 | + def run_process(process_name, dir) | |
77 | + Daemons.run_proc(process_name, :dir => dir, :dir_mode => :normal, :monitor => @monitor, :ARGV => @args) do |*args| | |
78 | + run process_name | |
79 | + end | |
80 | + end | |
81 | + | |
82 | + def run(worker_name = nil) | |
83 | + Dir.chdir(Rails.root) | |
84 | + | |
85 | + # Re-open file handles | |
86 | + @files_to_reopen.each do |file| | |
87 | + begin | |
88 | + file.reopen file.path, "a+" | |
89 | + file.sync = true | |
90 | + rescue ::Exception | |
91 | + end | |
92 | + end | |
93 | + | |
94 | + Delayed::Worker.logger = Logger.new(File.join(Rails.root, 'log', 'delayed_job.log')) | |
95 | + Delayed::Worker.backend.after_fork | |
96 | + | |
97 | + worker = Delayed::Worker.new(@options) | |
98 | + worker.name_prefix = "#{worker_name} " | |
99 | + worker.start | |
100 | + rescue => e | |
101 | + Rails.logger.fatal e | |
102 | + STDERR.puts e.message | |
103 | + exit 1 | |
104 | + end | |
105 | + | |
106 | + end | |
107 | +end | ... | ... |
vendor/plugins/delayed_job/lib/delayed/message_sending.rb
0 → 100644
... | ... | @@ -0,0 +1,45 @@ |
1 | +require 'active_support/basic_object' | |
2 | + | |
3 | +module Delayed | |
4 | + class DelayProxy < ActiveSupport::BasicObject | |
5 | + def initialize(target, options) | |
6 | + @target = target | |
7 | + @options = options | |
8 | + end | |
9 | + | |
10 | + def method_missing(method, *args) | |
11 | + Job.create({ | |
12 | + :payload_object => PerformableMethod.new(@target, method.to_sym, args), | |
13 | + :priority => ::Delayed::Worker.default_priority | |
14 | + }.merge(@options)) | |
15 | + end | |
16 | + end | |
17 | + | |
18 | + module MessageSending | |
19 | + def delay(options = {}) | |
20 | + DelayProxy.new(self, options) | |
21 | + end | |
22 | + alias __delay__ delay | |
23 | + | |
24 | + def send_later(method, *args) | |
25 | + warn "[DEPRECATION] `object.send_later(:method)` is deprecated. Use `object.delay.method" | |
26 | + __delay__.__send__(method, *args) | |
27 | + end | |
28 | + | |
29 | + def send_at(time, method, *args) | |
30 | + warn "[DEPRECATION] `object.send_at(time, :method)` is deprecated. Use `object.delay(:run_at => time).method" | |
31 | + __delay__(:run_at => time).__send__(method, *args) | |
32 | + end | |
33 | + | |
34 | + module ClassMethods | |
35 | + def handle_asynchronously(method) | |
36 | + aliased_method, punctuation = method.to_s.sub(/([?!=])$/, ''), $1 | |
37 | + with_method, without_method = "#{aliased_method}_with_delay#{punctuation}", "#{aliased_method}_without_delay#{punctuation}" | |
38 | + define_method(with_method) do |*args| | |
39 | + delay.__send__(without_method, *args) | |
40 | + end | |
41 | + alias_method_chain method, :delay | |
42 | + end | |
43 | + end | |
44 | + end | |
45 | +end | ... | ... |
vendor/plugins/delayed_job/lib/delayed/performable_method.rb
0 → 100644
... | ... | @@ -0,0 +1,27 @@ |
1 | +module Delayed | |
2 | + class PerformableMethod < Struct.new(:object, :method, :args) | |
3 | + def initialize(object, method, args) | |
4 | + raise NoMethodError, "undefined method `#{method}' for #{object.inspect}" unless object.respond_to?(method, true) | |
5 | + | |
6 | + self.object = object | |
7 | + self.args = args | |
8 | + self.method = method.to_sym | |
9 | + end | |
10 | + | |
11 | + def display_name | |
12 | + "#{object.class}##{method}" | |
13 | + end | |
14 | + | |
15 | + def perform | |
16 | + object.send(method, *args) if object | |
17 | + end | |
18 | + | |
19 | + def method_missing(symbol, *args) | |
20 | + object.respond_to?(symbol) ? object.send(symbol, *args) : super | |
21 | + end | |
22 | + | |
23 | + def respond_to?(symbol, include_private=false) | |
24 | + object.respond_to?(symbol, include_private) || super | |
25 | + end | |
26 | + end | |
27 | +end | ... | ... |
... | ... | @@ -0,0 +1,31 @@ |
1 | +# Capistrano Recipes for managing delayed_job | |
2 | +# | |
3 | +# Add these callbacks to have the delayed_job process restart when the server | |
4 | +# is restarted: | |
5 | +# | |
6 | +# after "deploy:stop", "delayed_job:stop" | |
7 | +# after "deploy:start", "delayed_job:start" | |
8 | +# after "deploy:restart", "delayed_job:restart" | |
9 | + | |
10 | +Capistrano::Configuration.instance.load do | |
11 | + namespace :delayed_job do | |
12 | + def rails_env | |
13 | + fetch(:rails_env, false) ? "RAILS_ENV=#{fetch(:rails_env)}" : '' | |
14 | + end | |
15 | + | |
16 | + desc "Stop the delayed_job process" | |
17 | + task :stop, :roles => :app do | |
18 | + run "cd #{current_path};#{rails_env} script/delayed_job stop" | |
19 | + end | |
20 | + | |
21 | + desc "Start the delayed_job process" | |
22 | + task :start, :roles => :app do | |
23 | + run "cd #{current_path};#{rails_env} script/delayed_job start" | |
24 | + end | |
25 | + | |
26 | + desc "Restart the delayed_job process" | |
27 | + task :restart, :roles => :app do | |
28 | + run "cd #{current_path};#{rails_env} script/delayed_job restart" | |
29 | + end | |
30 | + end | |
31 | +end | |
0 | 32 | \ No newline at end of file | ... | ... |
... | ... | @@ -0,0 +1,15 @@ |
1 | +# Re-definitions are appended to existing tasks | |
2 | +task :environment | |
3 | +task :merb_env | |
4 | + | |
5 | +namespace :jobs do | |
6 | + desc "Clear the delayed_job queue." | |
7 | + task :clear => [:merb_env, :environment] do | |
8 | + Delayed::Job.delete_all | |
9 | + end | |
10 | + | |
11 | + desc "Start a delayed_job worker." | |
12 | + task :work => [:merb_env, :environment] do | |
13 | + Delayed::Worker.new(:min_priority => ENV['MIN_PRIORITY'], :max_priority => ENV['MAX_PRIORITY']).start | |
14 | + end | |
15 | +end | ... | ... |
... | ... | @@ -0,0 +1,191 @@ |
1 | +require 'timeout' | |
2 | +require 'active_support/core_ext/numeric/time' | |
3 | +require 'active_support/core_ext/class/attribute_accessors' | |
4 | +require 'active_support/core_ext/kernel' | |
5 | + | |
6 | +module Delayed | |
7 | + class Worker | |
8 | + cattr_accessor :min_priority, :max_priority, :max_attempts, :max_run_time, :default_priority, :sleep_delay, :logger | |
9 | + self.sleep_delay = 5 | |
10 | + self.max_attempts = 25 | |
11 | + self.max_run_time = 4.hours | |
12 | + self.default_priority = 0 | |
13 | + | |
14 | + # By default failed jobs are destroyed after too many attempts. If you want to keep them around | |
15 | + # (perhaps to inspect the reason for the failure), set this to false. | |
16 | + cattr_accessor :destroy_failed_jobs | |
17 | + self.destroy_failed_jobs = true | |
18 | + | |
19 | + self.logger = if defined?(Merb::Logger) | |
20 | + Merb.logger | |
21 | + elsif defined?(RAILS_DEFAULT_LOGGER) | |
22 | + RAILS_DEFAULT_LOGGER | |
23 | + end | |
24 | + | |
25 | + # name_prefix is ignored if name is set directly | |
26 | + attr_accessor :name_prefix | |
27 | + | |
28 | + cattr_reader :backend | |
29 | + | |
30 | + def self.backend=(backend) | |
31 | + if backend.is_a? Symbol | |
32 | + require "delayed/backend/#{backend}" | |
33 | + backend = "Delayed::Backend::#{backend.to_s.classify}::Job".constantize | |
34 | + end | |
35 | + @@backend = backend | |
36 | + silence_warnings { ::Delayed.const_set(:Job, backend) } | |
37 | + end | |
38 | + | |
39 | + def self.guess_backend | |
40 | + self.backend ||= if defined?(ActiveRecord) | |
41 | + :active_record | |
42 | + elsif defined?(MongoMapper) | |
43 | + :mongo_mapper | |
44 | + else | |
45 | + logger.warn "Could not decide on a backend, defaulting to active_record" | |
46 | + :active_record | |
47 | + end | |
48 | + end | |
49 | + | |
50 | + def initialize(options={}) | |
51 | + @quiet = options[:quiet] | |
52 | + self.class.min_priority = options[:min_priority] if options.has_key?(:min_priority) | |
53 | + self.class.max_priority = options[:max_priority] if options.has_key?(:max_priority) | |
54 | + end | |
55 | + | |
56 | + # Every worker has a unique name which by default is the pid of the process. There are some | |
57 | + # advantages to overriding this with something which survives worker retarts: Workers can# | |
58 | + # safely resume working on tasks which are locked by themselves. The worker will assume that | |
59 | + # it crashed before. | |
60 | + def name | |
61 | + return @name unless @name.nil? | |
62 | + "#{@name_prefix}host:#{Socket.gethostname} pid:#{Process.pid}" rescue "#{@name_prefix}pid:#{Process.pid}" | |
63 | + end | |
64 | + | |
65 | + # Sets the name of the worker. | |
66 | + # Setting the name to nil will reset the default worker name | |
67 | + def name=(val) | |
68 | + @name = val | |
69 | + end | |
70 | + | |
71 | + def start | |
72 | + say "Starting job worker" | |
73 | + | |
74 | + trap('TERM') { say 'Exiting...'; $exit = true } | |
75 | + trap('INT') { say 'Exiting...'; $exit = true } | |
76 | + | |
77 | + loop do | |
78 | + result = nil | |
79 | + | |
80 | + realtime = Benchmark.realtime do | |
81 | + result = work_off | |
82 | + end | |
83 | + | |
84 | + count = result.sum | |
85 | + | |
86 | + break if $exit | |
87 | + | |
88 | + if count.zero? | |
89 | + sleep(@@sleep_delay) | |
90 | + else | |
91 | + say "#{count} jobs processed at %.4f j/s, %d failed ..." % [count / realtime, result.last] | |
92 | + end | |
93 | + | |
94 | + break if $exit | |
95 | + end | |
96 | + | |
97 | + ensure | |
98 | + Delayed::Job.clear_locks!(name) | |
99 | + end | |
100 | + | |
101 | + # Do num jobs and return stats on success/failure. | |
102 | + # Exit early if interrupted. | |
103 | + def work_off(num = 100) | |
104 | + success, failure = 0, 0 | |
105 | + | |
106 | + num.times do | |
107 | + case reserve_and_run_one_job | |
108 | + when true | |
109 | + success += 1 | |
110 | + when false | |
111 | + failure += 1 | |
112 | + else | |
113 | + break # leave if no work could be done | |
114 | + end | |
115 | + break if $exit # leave if we're exiting | |
116 | + end | |
117 | + | |
118 | + return [success, failure] | |
119 | + end | |
120 | + | |
121 | + def run(job) | |
122 | + runtime = Benchmark.realtime do | |
123 | + Timeout.timeout(self.class.max_run_time.to_i) { job.invoke_job } | |
124 | + job.destroy | |
125 | + end | |
126 | + say "#{job.name} completed after %.4f" % runtime | |
127 | + return true # did work | |
128 | + rescue Exception => e | |
129 | + handle_failed_job(job, e) | |
130 | + return false # work failed | |
131 | + end | |
132 | + | |
133 | + # Reschedule the job in the future (when a job fails). | |
134 | + # Uses an exponential scale depending on the number of failed attempts. | |
135 | + def reschedule(job, time = nil) | |
136 | + if (job.attempts += 1) < self.class.max_attempts | |
137 | + time ||= Job.db_time_now + (job.attempts ** 4) + 5 | |
138 | + job.run_at = time | |
139 | + job.unlock | |
140 | + job.save! | |
141 | + else | |
142 | + say "PERMANENTLY removing #{job.name} because of #{job.attempts} consecutive failures.", Logger::INFO | |
143 | + | |
144 | + if job.payload_object.respond_to? :on_permanent_failure | |
145 | + say "Running on_permanent_failure hook" | |
146 | + failure_method = job.payload_object.method(:on_permanent_failure) | |
147 | + if failure_method.arity == 1 | |
148 | + failure_method.call(job) | |
149 | + else | |
150 | + failure_method.call | |
151 | + end | |
152 | + end | |
153 | + | |
154 | + self.class.destroy_failed_jobs ? job.destroy : job.update_attributes(:failed_at => Delayed::Job.db_time_now) | |
155 | + end | |
156 | + end | |
157 | + | |
158 | + def say(text, level = Logger::INFO) | |
159 | + text = "[Worker(#{name})] #{text}" | |
160 | + puts text unless @quiet | |
161 | + logger.add level, "#{Time.now.strftime('%FT%T%z')}: #{text}" if logger | |
162 | + end | |
163 | + | |
164 | + protected | |
165 | + | |
166 | + def handle_failed_job(job, error) | |
167 | + job.last_error = error.message + "\n" + error.backtrace.join("\n") | |
168 | + say "#{job.name} failed with #{error.class.name}: #{error.message} - #{job.attempts} failed attempts", Logger::ERROR | |
169 | + reschedule(job) | |
170 | + end | |
171 | + | |
172 | + # Run the next job we can get an exclusive lock on. | |
173 | + # If no jobs are left we return nil | |
174 | + def reserve_and_run_one_job | |
175 | + | |
176 | + # We get up to 5 jobs from the db. In case we cannot get exclusive access to a job we try the next. | |
177 | + # this leads to a more even distribution of jobs across the worker processes | |
178 | + job = Delayed::Job.find_available(name, 5, self.class.max_run_time).detect do |job| | |
179 | + if job.lock_exclusively!(self.class.max_run_time, name) | |
180 | + say "acquired lock on #{job.name}" | |
181 | + true | |
182 | + else | |
183 | + say "failed to acquire exclusive lock for #{job.name}", Logger::WARN | |
184 | + false | |
185 | + end | |
186 | + end | |
187 | + | |
188 | + run(job) if job | |
189 | + end | |
190 | + end | |
191 | +end | ... | ... |
... | ... | @@ -0,0 +1,40 @@ |
1 | +# These extensions allow properly serializing and autoloading of | |
2 | +# Classes, Modules and Structs | |
3 | + | |
4 | +require 'yaml' | |
5 | + | |
6 | +class Module | |
7 | + yaml_as "tag:ruby.yaml.org,2002:module" | |
8 | + | |
9 | + def self.yaml_new(klass, tag, val) | |
10 | + val.constantize | |
11 | + end | |
12 | + | |
13 | + def to_yaml( opts = {} ) | |
14 | + YAML::quick_emit( nil, opts ) { |out| | |
15 | + out.scalar(taguri, self.name, :plain) | |
16 | + } | |
17 | + end | |
18 | + | |
19 | + def yaml_tag_read_class(name) | |
20 | + # Constantize the object so that ActiveSupport can attempt | |
21 | + # its auto loading magic. Will raise LoadError if not successful. | |
22 | + name.constantize | |
23 | + name | |
24 | + end | |
25 | + | |
26 | +end | |
27 | + | |
28 | +class Class | |
29 | + yaml_as "tag:ruby.yaml.org,2002:class" | |
30 | + remove_method :to_yaml # use Module's to_yaml | |
31 | +end | |
32 | + | |
33 | +class Struct | |
34 | + def self.yaml_tag_read_class(name) | |
35 | + # Constantize the object so that ActiveSupport can attempt | |
36 | + # its auto loading magic. Will raise LoadError if not successful. | |
37 | + name.constantize | |
38 | + "Struct::#{ name }" | |
39 | + end | |
40 | +end | ... | ... |
... | ... | @@ -0,0 +1,15 @@ |
1 | +require 'active_support' | |
2 | + | |
3 | +require File.dirname(__FILE__) + '/delayed/message_sending' | |
4 | +require File.dirname(__FILE__) + '/delayed/performable_method' | |
5 | +require File.dirname(__FILE__) + '/delayed/yaml_ext' | |
6 | +require File.dirname(__FILE__) + '/delayed/backend/base' | |
7 | +require File.dirname(__FILE__) + '/delayed/worker' | |
8 | +require File.dirname(__FILE__) + '/delayed/railtie' if defined?(::Rails::Railtie) | |
9 | + | |
10 | +Object.send(:include, Delayed::MessageSending) | |
11 | +Module.send(:include, Delayed::MessageSending::ClassMethods) | |
12 | + | |
13 | +if defined?(Merb::Plugins) | |
14 | + Merb::Plugins.add_rakefiles File.dirname(__FILE__) / 'delayed' / 'tasks' | |
15 | +end | ... | ... |
vendor/plugins/delayed_job/lib/generators/delayed_job/delayed_job_generator.rb
0 → 100644
... | ... | @@ -0,0 +1,34 @@ |
1 | +require 'rails/generators' | |
2 | +require 'rails/generators/migration' | |
3 | + | |
4 | +class DelayedJobGenerator < Rails::Generators::Base | |
5 | + | |
6 | + include Rails::Generators::Migration | |
7 | + | |
8 | + def self.source_root | |
9 | + @source_root ||= File.join(File.dirname(__FILE__), 'templates') | |
10 | + end | |
11 | + | |
12 | + # Implement the required interface for Rails::Generators::Migration. | |
13 | + # | |
14 | + def self.next_migration_number(dirname) #:nodoc: | |
15 | + next_migration_number = current_migration_number(dirname) + 1 | |
16 | + if ActiveRecord::Base.timestamped_migrations | |
17 | + [Time.now.utc.strftime("%Y%m%d%H%M%S"), "%.14d" % next_migration_number].max | |
18 | + else | |
19 | + "%.3d" % next_migration_number | |
20 | + end | |
21 | + end | |
22 | + | |
23 | + def create_script_file | |
24 | + template 'script', 'script/delayed_job' | |
25 | + chmod 'script/delayed_job', 0755 | |
26 | + end | |
27 | + | |
28 | + def create_migration_file | |
29 | + if defined?(ActiveRecord) | |
30 | + migration_template 'migration.rb', 'db/migrate/create_delayed_jobs.rb' | |
31 | + end | |
32 | + end | |
33 | + | |
34 | +end | |
0 | 35 | \ No newline at end of file | ... | ... |
vendor/plugins/delayed_job/lib/generators/delayed_job/templates/migration.rb
0 → 100644
... | ... | @@ -0,0 +1,21 @@ |
1 | +class CreateDelayedJobs < ActiveRecord::Migration | |
2 | + def self.up | |
3 | + create_table :delayed_jobs, :force => true do |table| | |
4 | + table.integer :priority, :default => 0 # Allows some jobs to jump to the front of the queue | |
5 | + table.integer :attempts, :default => 0 # Provides for retries, but still fail eventually. | |
6 | + table.text :handler # YAML-encoded string of the object that will do work | |
7 | + table.text :last_error # reason for last failure (See Note below) | |
8 | + table.datetime :run_at # When to run. Could be Time.zone.now for immediately, or sometime in the future. | |
9 | + table.datetime :locked_at # Set when a client is working on this object | |
10 | + table.datetime :failed_at # Set when all retries have failed (actually, by default, the record is deleted instead) | |
11 | + table.string :locked_by # Who is working on this object (if locked) | |
12 | + table.timestamps | |
13 | + end | |
14 | + | |
15 | + add_index :delayed_jobs, [:priority, :run_at], :name => 'delayed_jobs_priority' | |
16 | + end | |
17 | + | |
18 | + def self.down | |
19 | + drop_table :delayed_jobs | |
20 | + end | |
21 | +end | |
0 | 22 | \ No newline at end of file | ... | ... |
... | ... | @@ -0,0 +1 @@ |
1 | +require 'delayed_job' | ... | ... |
... | ... | @@ -0,0 +1 @@ |
1 | +require File.expand_path(File.join(File.dirname(__FILE__), '..', 'lib', 'delayed', 'recipes')) | ... | ... |
vendor/plugins/delayed_job/spec/backend/active_record_job_spec.rb
0 → 100644
... | ... | @@ -0,0 +1,46 @@ |
1 | +require 'spec_helper' | |
2 | +require 'backend/shared_backend_spec' | |
3 | +require 'delayed/backend/active_record' | |
4 | + | |
5 | +describe Delayed::Backend::ActiveRecord::Job do | |
6 | + before(:all) do | |
7 | + @backend = Delayed::Backend::ActiveRecord::Job | |
8 | + end | |
9 | + | |
10 | + before(:each) do | |
11 | + Delayed::Backend::ActiveRecord::Job.delete_all | |
12 | + SimpleJob.runs = 0 | |
13 | + end | |
14 | + | |
15 | + after do | |
16 | + Time.zone = nil | |
17 | + end | |
18 | + | |
19 | + it_should_behave_like 'a backend' | |
20 | + | |
21 | + context "db_time_now" do | |
22 | + it "should return time in current time zone if set" do | |
23 | + Time.zone = 'Eastern Time (US & Canada)' | |
24 | + %w(EST EDT).should include(Delayed::Job.db_time_now.zone) | |
25 | + end | |
26 | + | |
27 | + it "should return UTC time if that is the AR default" do | |
28 | + Time.zone = nil | |
29 | + ActiveRecord::Base.default_timezone = :utc | |
30 | + Delayed::Backend::ActiveRecord::Job.db_time_now.zone.should == 'UTC' | |
31 | + end | |
32 | + | |
33 | + it "should return local time if that is the AR default" do | |
34 | + Time.zone = 'Central Time (US & Canada)' | |
35 | + ActiveRecord::Base.default_timezone = :local | |
36 | + %w(CST CDT).should include(Delayed::Backend::ActiveRecord::Job.db_time_now.zone) | |
37 | + end | |
38 | + end | |
39 | + | |
40 | + describe "after_fork" do | |
41 | + it "should call reconnect on the connection" do | |
42 | + ActiveRecord::Base.connection.should_receive(:reconnect!) | |
43 | + Delayed::Backend::ActiveRecord::Job.after_fork | |
44 | + end | |
45 | + end | |
46 | +end | ... | ... |
vendor/plugins/delayed_job/spec/backend/couch_rest_job_spec.rb
0 → 100644
... | ... | @@ -0,0 +1,15 @@ |
1 | +require 'spec_helper' | |
2 | +require 'backend/shared_backend_spec' | |
3 | +require 'delayed/backend/couch_rest' | |
4 | + | |
5 | +describe Delayed::Backend::CouchRest::Job do | |
6 | + before(:all) do | |
7 | + @backend = Delayed::Backend::CouchRest::Job | |
8 | + end | |
9 | + | |
10 | + before(:each) do | |
11 | + @backend.delete_all | |
12 | + end | |
13 | + | |
14 | + it_should_behave_like 'a backend' | |
15 | +end | ... | ... |
vendor/plugins/delayed_job/spec/backend/data_mapper_job_spec.rb
0 → 100644
... | ... | @@ -0,0 +1,16 @@ |
1 | +require 'spec_helper' | |
2 | +require 'backend/shared_backend_spec' | |
3 | +require 'delayed/backend/data_mapper' | |
4 | + | |
5 | +describe Delayed::Backend::DataMapper::Job do | |
6 | + before(:all) do | |
7 | + @backend = Delayed::Backend::DataMapper::Job | |
8 | + end | |
9 | + | |
10 | + before(:each) do | |
11 | + # reset database before each example is run | |
12 | + DataMapper.auto_migrate! | |
13 | + end | |
14 | + | |
15 | + it_should_behave_like 'a backend' | |
16 | +end | ... | ... |
vendor/plugins/delayed_job/spec/backend/mongo_mapper_job_spec.rb
0 → 100644
... | ... | @@ -0,0 +1,94 @@ |
1 | +require 'spec_helper' | |
2 | +require 'backend/shared_backend_spec' | |
3 | +require 'delayed/backend/mongo_mapper' | |
4 | + | |
5 | +describe Delayed::Backend::MongoMapper::Job do | |
6 | + before(:all) do | |
7 | + @backend = Delayed::Backend::MongoMapper::Job | |
8 | + end | |
9 | + | |
10 | + before(:each) do | |
11 | + MongoMapper.database.collections.each(&:remove) | |
12 | + end | |
13 | + | |
14 | + it_should_behave_like 'a backend' | |
15 | + | |
16 | + describe "indexes" do | |
17 | + it "should have combo index on priority and run_at" do | |
18 | + @backend.collection.index_information.detect { |index| index[0] == 'priority_1_run_at_1' }.should_not be_nil | |
19 | + end | |
20 | + | |
21 | + it "should have index on locked_by" do | |
22 | + @backend.collection.index_information.detect { |index| index[0] == 'locked_by_1' }.should_not be_nil | |
23 | + end | |
24 | + end | |
25 | + | |
26 | + describe "delayed method" do | |
27 | + class MongoStoryReader | |
28 | + def read(story) | |
29 | + "Epilog: #{story.tell}" | |
30 | + end | |
31 | + end | |
32 | + | |
33 | + class MongoStory | |
34 | + include ::MongoMapper::Document | |
35 | + key :text, String | |
36 | + | |
37 | + def tell | |
38 | + text | |
39 | + end | |
40 | + end | |
41 | + | |
42 | + it "should ignore not found errors because they are permanent" do | |
43 | + story = MongoStory.create :text => 'Once upon a time...' | |
44 | + job = story.delay.tell | |
45 | + story.destroy | |
46 | + lambda { job.invoke_job }.should_not raise_error | |
47 | + end | |
48 | + | |
49 | + it "should store the object as string" do | |
50 | + story = MongoStory.create :text => 'Once upon a time...' | |
51 | + job = story.delay.tell | |
52 | + | |
53 | + job.payload_object.class.should == Delayed::PerformableMethod | |
54 | + job.payload_object.object.should == story | |
55 | + job.payload_object.method.should == :tell | |
56 | + job.payload_object.args.should == [] | |
57 | + job.payload_object.perform.should == 'Once upon a time...' | |
58 | + end | |
59 | + | |
60 | + it "should store arguments as string" do | |
61 | + story = MongoStory.create :text => 'Once upon a time...' | |
62 | + job = MongoStoryReader.new.delay.read(story) | |
63 | + job.payload_object.class.should == Delayed::PerformableMethod | |
64 | + job.payload_object.method.should == :read | |
65 | + job.payload_object.args.should == [story] | |
66 | + job.payload_object.perform.should == 'Epilog: Once upon a time...' | |
67 | + end | |
68 | + end | |
69 | + | |
70 | + describe "before_fork" do | |
71 | + after do | |
72 | + MongoMapper.connection.connect_to_master | |
73 | + end | |
74 | + | |
75 | + it "should disconnect" do | |
76 | + lambda do | |
77 | + Delayed::Backend::MongoMapper::Job.before_fork | |
78 | + end.should change { !!MongoMapper.connection.connected? }.from(true).to(false) | |
79 | + end | |
80 | + end | |
81 | + | |
82 | + describe "after_fork" do | |
83 | + before do | |
84 | + MongoMapper.connection.close | |
85 | + end | |
86 | + | |
87 | + it "should call reconnect" do | |
88 | + lambda do | |
89 | + Delayed::Backend::MongoMapper::Job.after_fork | |
90 | + end.should change { !!MongoMapper.connection.connected? }.from(false).to(true) | |
91 | + end | |
92 | + end | |
93 | + | |
94 | +end | ... | ... |
vendor/plugins/delayed_job/spec/backend/shared_backend_spec.rb
0 → 100644
... | ... | @@ -0,0 +1,279 @@ |
1 | +class NamedJob < Struct.new(:perform) | |
2 | + def display_name | |
3 | + 'named_job' | |
4 | + end | |
5 | +end | |
6 | + | |
7 | +shared_examples_for 'a backend' do | |
8 | + def create_job(opts = {}) | |
9 | + @backend.create(opts.merge(:payload_object => SimpleJob.new)) | |
10 | + end | |
11 | + | |
12 | + before do | |
13 | + Delayed::Worker.max_priority = nil | |
14 | + Delayed::Worker.min_priority = nil | |
15 | + Delayed::Worker.default_priority = 99 | |
16 | + SimpleJob.runs = 0 | |
17 | + end | |
18 | + | |
19 | + it "should set run_at automatically if not set" do | |
20 | + @backend.create(:payload_object => ErrorJob.new ).run_at.should_not be_nil | |
21 | + end | |
22 | + | |
23 | + it "should not set run_at automatically if already set" do | |
24 | + later = @backend.db_time_now + 5.minutes | |
25 | + @backend.create(:payload_object => ErrorJob.new, :run_at => later).run_at.should be_close(later, 1) | |
26 | + end | |
27 | + | |
28 | + it "should raise ArgumentError when handler doesn't respond_to :perform" do | |
29 | + lambda { @backend.enqueue(Object.new) }.should raise_error(ArgumentError) | |
30 | + end | |
31 | + | |
32 | + it "should increase count after enqueuing items" do | |
33 | + @backend.enqueue SimpleJob.new | |
34 | + @backend.count.should == 1 | |
35 | + end | |
36 | + | |
37 | + it "should be able to set priority when enqueuing items" do | |
38 | + @job = @backend.enqueue SimpleJob.new, 5 | |
39 | + @job.priority.should == 5 | |
40 | + end | |
41 | + | |
42 | + it "should use default priority when it is not set" do | |
43 | + @job = @backend.enqueue SimpleJob.new | |
44 | + @job.priority.should == 99 | |
45 | + end | |
46 | + | |
47 | + it "should be able to set run_at when enqueuing items" do | |
48 | + later = @backend.db_time_now + 5.minutes | |
49 | + @job = @backend.enqueue SimpleJob.new, 5, later | |
50 | + @job.run_at.should be_close(later, 1) | |
51 | + end | |
52 | + | |
53 | + it "should work with jobs in modules" do | |
54 | + M::ModuleJob.runs = 0 | |
55 | + job = @backend.enqueue M::ModuleJob.new | |
56 | + lambda { job.invoke_job }.should change { M::ModuleJob.runs }.from(0).to(1) | |
57 | + end | |
58 | + | |
59 | + describe "payload_object" do | |
60 | + it "should raise a DeserializationError when the job class is totally unknown" do | |
61 | + job = @backend.new :handler => "--- !ruby/object:JobThatDoesNotExist {}" | |
62 | + lambda { job.payload_object }.should raise_error(Delayed::Backend::DeserializationError) | |
63 | + end | |
64 | + | |
65 | + it "should raise a DeserializationError when the job struct is totally unknown" do | |
66 | + job = @backend.new :handler => "--- !ruby/struct:StructThatDoesNotExist {}" | |
67 | + lambda { job.payload_object }.should raise_error(Delayed::Backend::DeserializationError) | |
68 | + end | |
69 | + | |
70 | + it "should autoload classes that are unknown at runtime" do | |
71 | + job = @backend.new :handler => "--- !ruby/object:Autoloaded::Clazz {}" | |
72 | + lambda { job.payload_object }.should_not raise_error(Delayed::Backend::DeserializationError) | |
73 | + end | |
74 | + | |
75 | + it "should autoload structs that are unknown at runtime" do | |
76 | + job = @backend.new :handler => "--- !ruby/struct:Autoloaded::Struct {}" | |
77 | + lambda { job.payload_object }.should_not raise_error(Delayed::Backend::DeserializationError) | |
78 | + end | |
79 | + end | |
80 | + | |
81 | + describe "find_available" do | |
82 | + it "should not find failed jobs" do | |
83 | + @job = create_job :attempts => 50, :failed_at => @backend.db_time_now | |
84 | + @backend.find_available('worker', 5, 1.second).should_not include(@job) | |
85 | + end | |
86 | + | |
87 | + it "should not find jobs scheduled for the future" do | |
88 | + @job = create_job :run_at => (@backend.db_time_now + 1.minute) | |
89 | + @backend.find_available('worker', 5, 4.hours).should_not include(@job) | |
90 | + end | |
91 | + | |
92 | + it "should not find jobs locked by another worker" do | |
93 | + @job = create_job(:locked_by => 'other_worker', :locked_at => @backend.db_time_now - 1.minute) | |
94 | + @backend.find_available('worker', 5, 4.hours).should_not include(@job) | |
95 | + end | |
96 | + | |
97 | + it "should find open jobs" do | |
98 | + @job = create_job | |
99 | + @backend.find_available('worker', 5, 4.hours).should include(@job) | |
100 | + end | |
101 | + | |
102 | + it "should find expired jobs" do | |
103 | + @job = create_job(:locked_by => 'worker', :locked_at => @backend.db_time_now - 2.minutes) | |
104 | + @backend.find_available('worker', 5, 1.minute).should include(@job) | |
105 | + end | |
106 | + | |
107 | + it "should find own jobs" do | |
108 | + @job = create_job(:locked_by => 'worker', :locked_at => (@backend.db_time_now - 1.minutes)) | |
109 | + @backend.find_available('worker', 5, 4.hours).should include(@job) | |
110 | + end | |
111 | + | |
112 | + it "should find only the right amount of jobs" do | |
113 | + 10.times { create_job } | |
114 | + @backend.find_available('worker', 7, 4.hours).should have(7).jobs | |
115 | + end | |
116 | + end | |
117 | + | |
118 | + context "when another worker is already performing an task, it" do | |
119 | + | |
120 | + before :each do | |
121 | + @job = @backend.create :payload_object => SimpleJob.new, :locked_by => 'worker1', :locked_at => @backend.db_time_now - 5.minutes | |
122 | + end | |
123 | + | |
124 | + it "should not allow a second worker to get exclusive access" do | |
125 | + @job.lock_exclusively!(4.hours, 'worker2').should == false | |
126 | + end | |
127 | + | |
128 | + it "should allow a second worker to get exclusive access if the timeout has passed" do | |
129 | + @job.lock_exclusively!(1.minute, 'worker2').should == true | |
130 | + end | |
131 | + | |
132 | + it "should be able to get access to the task if it was started more then max_age ago" do | |
133 | + @job.locked_at = 5.hours.ago | |
134 | + @job.save | |
135 | + | |
136 | + @job.lock_exclusively! 4.hours, 'worker2' | |
137 | + @job.reload | |
138 | + @job.locked_by.should == 'worker2' | |
139 | + @job.locked_at.should > 1.minute.ago | |
140 | + end | |
141 | + | |
142 | + it "should not be found by another worker" do | |
143 | + @backend.find_available('worker2', 1, 6.minutes).length.should == 0 | |
144 | + end | |
145 | + | |
146 | + it "should be found by another worker if the time has expired" do | |
147 | + @backend.find_available('worker2', 1, 4.minutes).length.should == 1 | |
148 | + end | |
149 | + | |
150 | + it "should be able to get exclusive access again when the worker name is the same" do | |
151 | + @job.lock_exclusively!(5.minutes, 'worker1').should be_true | |
152 | + @job.lock_exclusively!(5.minutes, 'worker1').should be_true | |
153 | + @job.lock_exclusively!(5.minutes, 'worker1').should be_true | |
154 | + end | |
155 | + end | |
156 | + | |
157 | + context "when another worker has worked on a task since the job was found to be available, it" do | |
158 | + | |
159 | + before :each do | |
160 | + @job = @backend.create :payload_object => SimpleJob.new | |
161 | + @job_copy_for_worker_2 = @backend.find(@job.id) | |
162 | + end | |
163 | + | |
164 | + it "should not allow a second worker to get exclusive access if already successfully processed by worker1" do | |
165 | + @job.destroy | |
166 | + @job_copy_for_worker_2.lock_exclusively!(4.hours, 'worker2').should == false | |
167 | + end | |
168 | + | |
169 | + it "should not allow a second worker to get exclusive access if failed to be processed by worker1 and run_at time is now in future (due to backing off behaviour)" do | |
170 | + @job.update_attributes(:attempts => 1, :run_at => 1.day.from_now) | |
171 | + @job_copy_for_worker_2.lock_exclusively!(4.hours, 'worker2').should == false | |
172 | + end | |
173 | + end | |
174 | + | |
175 | + context "#name" do | |
176 | + it "should be the class name of the job that was enqueued" do | |
177 | + @backend.create(:payload_object => ErrorJob.new ).name.should == 'ErrorJob' | |
178 | + end | |
179 | + | |
180 | + it "should be the method that will be called if its a performable method object" do | |
181 | + job = @backend.new(:payload_object => NamedJob.new) | |
182 | + job.name.should == 'named_job' | |
183 | + end | |
184 | + | |
185 | + it "should be the instance method that will be called if its a performable method object" do | |
186 | + @job = Story.create(:text => "...").delay.save | |
187 | + @job.name.should == 'Story#save' | |
188 | + end | |
189 | + end | |
190 | + | |
191 | + context "worker prioritization" do | |
192 | + before(:each) do | |
193 | + Delayed::Worker.max_priority = nil | |
194 | + Delayed::Worker.min_priority = nil | |
195 | + end | |
196 | + | |
197 | + it "should fetch jobs ordered by priority" do | |
198 | + 10.times { @backend.enqueue SimpleJob.new, rand(10) } | |
199 | + jobs = @backend.find_available('worker', 10) | |
200 | + jobs.size.should == 10 | |
201 | + jobs.each_cons(2) do |a, b| | |
202 | + a.priority.should <= b.priority | |
203 | + end | |
204 | + end | |
205 | + | |
206 | + it "should only find jobs greater than or equal to min priority" do | |
207 | + min = 5 | |
208 | + Delayed::Worker.min_priority = min | |
209 | + 10.times {|i| @backend.enqueue SimpleJob.new, i } | |
210 | + jobs = @backend.find_available('worker', 10) | |
211 | + jobs.each {|job| job.priority.should >= min} | |
212 | + end | |
213 | + | |
214 | + it "should only find jobs less than or equal to max priority" do | |
215 | + max = 5 | |
216 | + Delayed::Worker.max_priority = max | |
217 | + 10.times {|i| @backend.enqueue SimpleJob.new, i } | |
218 | + jobs = @backend.find_available('worker', 10) | |
219 | + jobs.each {|job| job.priority.should <= max} | |
220 | + end | |
221 | + end | |
222 | + | |
223 | + context "clear_locks!" do | |
224 | + before do | |
225 | + @job = create_job(:locked_by => 'worker', :locked_at => @backend.db_time_now) | |
226 | + end | |
227 | + | |
228 | + it "should clear locks for the given worker" do | |
229 | + @backend.clear_locks!('worker') | |
230 | + @backend.find_available('worker2', 5, 1.minute).should include(@job) | |
231 | + end | |
232 | + | |
233 | + it "should not clear locks for other workers" do | |
234 | + @backend.clear_locks!('worker1') | |
235 | + @backend.find_available('worker1', 5, 1.minute).should_not include(@job) | |
236 | + end | |
237 | + end | |
238 | + | |
239 | + context "unlock" do | |
240 | + before do | |
241 | + @job = create_job(:locked_by => 'worker', :locked_at => @backend.db_time_now) | |
242 | + end | |
243 | + | |
244 | + it "should clear locks" do | |
245 | + @job.unlock | |
246 | + @job.locked_by.should be_nil | |
247 | + @job.locked_at.should be_nil | |
248 | + end | |
249 | + end | |
250 | + | |
251 | + context "large handler" do | |
252 | + before do | |
253 | + text = "Lorem ipsum dolor sit amet. " * 1000 | |
254 | + @job = @backend.enqueue Delayed::PerformableMethod.new(text, :length, {}) | |
255 | + end | |
256 | + | |
257 | + it "should have an id" do | |
258 | + @job.id.should_not be_nil | |
259 | + end | |
260 | + end | |
261 | + | |
262 | + describe "yaml serialization" do | |
263 | + it "should reload changed attributes" do | |
264 | + job = @backend.enqueue SimpleJob.new | |
265 | + yaml = job.to_yaml | |
266 | + job.priority = 99 | |
267 | + job.save | |
268 | + YAML.load(yaml).priority.should == 99 | |
269 | + end | |
270 | + | |
271 | + it "should ignore destroyed records" do | |
272 | + job = @backend.enqueue SimpleJob.new | |
273 | + yaml = job.to_yaml | |
274 | + job.destroy | |
275 | + lambda { YAML.load(yaml).should be_nil }.should_not raise_error | |
276 | + end | |
277 | + end | |
278 | + | |
279 | +end | ... | ... |
... | ... | @@ -0,0 +1,51 @@ |
1 | +require 'spec_helper' | |
2 | + | |
3 | +describe Delayed::MessageSending do | |
4 | + describe "handle_asynchronously" do | |
5 | + class Story < ActiveRecord::Base | |
6 | + def tell!(arg) | |
7 | + end | |
8 | + handle_asynchronously :tell! | |
9 | + end | |
10 | + | |
11 | + it "should alias original method" do | |
12 | + Story.new.should respond_to(:tell_without_delay!) | |
13 | + Story.new.should respond_to(:tell_with_delay!) | |
14 | + end | |
15 | + | |
16 | + it "should create a PerformableMethod" do | |
17 | + story = Story.create! | |
18 | + lambda { | |
19 | + job = story.tell!(1) | |
20 | + job.payload_object.class.should == Delayed::PerformableMethod | |
21 | + job.payload_object.method.should == :tell_without_delay! | |
22 | + job.payload_object.args.should == [1] | |
23 | + }.should change { Delayed::Job.count } | |
24 | + end | |
25 | + end | |
26 | + | |
27 | + context "delay" do | |
28 | + it "should create a new PerformableMethod job" do | |
29 | + lambda { | |
30 | + job = "hello".delay.count('l') | |
31 | + job.payload_object.class.should == Delayed::PerformableMethod | |
32 | + job.payload_object.method.should == :count | |
33 | + job.payload_object.args.should == ['l'] | |
34 | + }.should change { Delayed::Job.count }.by(1) | |
35 | + end | |
36 | + | |
37 | + it "should set default priority" do | |
38 | + Delayed::Worker.default_priority = 99 | |
39 | + job = Object.delay.to_s | |
40 | + job.priority.should == 99 | |
41 | + Delayed::Worker.default_priority = 0 | |
42 | + end | |
43 | + | |
44 | + it "should set job options" do | |
45 | + run_at = Time.parse('2010-05-03 12:55 AM') | |
46 | + job = Object.delay(:priority => 20, :run_at => run_at).to_s | |
47 | + job.run_at.should == run_at | |
48 | + job.priority.should == 20 | |
49 | + end | |
50 | + end | |
51 | +end | ... | ... |
vendor/plugins/delayed_job/spec/performable_method_spec.rb
0 → 100644
... | ... | @@ -0,0 +1,48 @@ |
1 | +require 'spec_helper' | |
2 | + | |
3 | +describe Delayed::PerformableMethod do | |
4 | + describe "perform" do | |
5 | + before do | |
6 | + @method = Delayed::PerformableMethod.new("foo", :count, ['o']) | |
7 | + end | |
8 | + | |
9 | + context "with the persisted record cannot be found" do | |
10 | + before do | |
11 | + @method.object = nil | |
12 | + end | |
13 | + | |
14 | + it "should be a no-op if object is nil" do | |
15 | + lambda { @method.perform }.should_not raise_error | |
16 | + end | |
17 | + end | |
18 | + | |
19 | + it "should call the method on the object" do | |
20 | + @method.object.should_receive(:count).with('o') | |
21 | + @method.perform | |
22 | + end | |
23 | + | |
24 | + it "should respond to on_permanent_failure when implemented and target object is called via object.delay.do_something" do | |
25 | + @method = Delayed::PerformableMethod.new(OnPermanentFailureJob.new, :perform, []) | |
26 | + @method.respond_to?(:on_permanent_failure).should be_true | |
27 | + @method.object.should_receive(:on_permanent_failure) | |
28 | + @method.on_permanent_failure | |
29 | + end | |
30 | + end | |
31 | + | |
32 | + it "should raise a NoMethodError if target method doesn't exist" do | |
33 | + lambda { | |
34 | + Delayed::PerformableMethod.new(Object, :method_that_does_not_exist, []) | |
35 | + }.should raise_error(NoMethodError) | |
36 | + end | |
37 | + | |
38 | + it "should not raise NoMethodError if target method is private" do | |
39 | + clazz = Class.new do | |
40 | + def private_method | |
41 | + end | |
42 | + private :private_method | |
43 | + end | |
44 | + lambda { | |
45 | + Delayed::PerformableMethod.new(clazz.new, :private_method, []) | |
46 | + }.should_not raise_error(NoMethodError) | |
47 | + end | |
48 | +end | ... | ... |
... | ... | @@ -0,0 +1,25 @@ |
1 | +class SimpleJob | |
2 | + cattr_accessor :runs; self.runs = 0 | |
3 | + def perform; @@runs += 1; end | |
4 | +end | |
5 | + | |
6 | +class ErrorJob | |
7 | + cattr_accessor :runs; self.runs = 0 | |
8 | + def perform; raise 'did not work'; end | |
9 | +end | |
10 | + | |
11 | +class LongRunningJob | |
12 | + def perform; sleep 250; end | |
13 | +end | |
14 | + | |
15 | +class OnPermanentFailureJob < SimpleJob | |
16 | + def on_permanent_failure | |
17 | + end | |
18 | +end | |
19 | + | |
20 | +module M | |
21 | + class ModuleJob | |
22 | + cattr_accessor :runs; self.runs = 0 | |
23 | + def perform; @@runs += 1; end | |
24 | + end | |
25 | +end | ... | ... |
... | ... | @@ -0,0 +1,33 @@ |
1 | +require 'active_record' | |
2 | + | |
3 | +ActiveRecord::Base.establish_connection(:adapter => 'sqlite3', :database => ':memory:') | |
4 | +ActiveRecord::Base.logger = Delayed::Worker.logger | |
5 | +ActiveRecord::Migration.verbose = false | |
6 | + | |
7 | +ActiveRecord::Schema.define do | |
8 | + create_table :delayed_jobs, :force => true do |table| | |
9 | + table.integer :priority, :default => 0 | |
10 | + table.integer :attempts, :default => 0 | |
11 | + table.text :handler | |
12 | + table.text :last_error | |
13 | + table.datetime :run_at | |
14 | + table.datetime :locked_at | |
15 | + table.datetime :failed_at | |
16 | + table.string :locked_by | |
17 | + table.timestamps | |
18 | + end | |
19 | + | |
20 | + add_index :delayed_jobs, [:priority, :run_at], :name => 'delayed_jobs_priority' | |
21 | + | |
22 | + create_table :stories, :force => true do |table| | |
23 | + table.string :text | |
24 | + end | |
25 | +end | |
26 | + | |
27 | +# Purely useful for test cases... | |
28 | +class Story < ActiveRecord::Base | |
29 | + def tell; text; end | |
30 | + def whatever(n, _); tell*n; end | |
31 | + | |
32 | + handle_asynchronously :whatever | |
33 | +end | ... | ... |
... | ... | @@ -0,0 +1,7 @@ |
1 | +require 'couchrest' | |
2 | +require 'delayed/backend/couch_rest' | |
3 | + | |
4 | +Delayed::Backend::CouchRest::Job.use_database CouchRest::Server.new.database!('delayed_job_spec') | |
5 | + | |
6 | +# try to perform a query to check that we can connect | |
7 | +Delayed::Backend::CouchRest::Job.all | |
0 | 8 | \ No newline at end of file | ... | ... |
... | ... | @@ -0,0 +1,17 @@ |
1 | +require 'mongo_mapper' | |
2 | + | |
3 | +MongoMapper.config = { | |
4 | + RAILS_ENV => {'database' => 'delayed_job'} | |
5 | +} | |
6 | +MongoMapper.connect RAILS_ENV | |
7 | + | |
8 | +unless defined?(Story) | |
9 | + class Story | |
10 | + include ::MongoMapper::Document | |
11 | + def tell; text; end | |
12 | + def whatever(n, _); tell*n; end | |
13 | + def self.count; end | |
14 | + | |
15 | + handle_asynchronously :whatever | |
16 | + end | |
17 | +end | ... | ... |
... | ... | @@ -0,0 +1,31 @@ |
1 | +$:.unshift(File.dirname(__FILE__) + '/../lib') | |
2 | + | |
3 | +require 'rubygems' | |
4 | +require 'spec' | |
5 | +require 'logger' | |
6 | + | |
7 | +gem 'activerecord', ENV['RAILS_VERSION'] if ENV['RAILS_VERSION'] | |
8 | + | |
9 | +require 'delayed_job' | |
10 | +require 'sample_jobs' | |
11 | + | |
12 | +Delayed::Worker.logger = Logger.new('/tmp/dj.log') | |
13 | +RAILS_ENV = 'test' | |
14 | + | |
15 | +# determine the available backends | |
16 | +BACKENDS = [] | |
17 | +Dir.glob("#{File.dirname(__FILE__)}/setup/*.rb") do |backend| | |
18 | + begin | |
19 | + backend = File.basename(backend, '.rb') | |
20 | + require "setup/#{backend}" | |
21 | + require "backend/#{backend}_job_spec" | |
22 | + BACKENDS << backend.to_sym | |
23 | + rescue Exception | |
24 | + puts "Unable to load #{backend} backend: #{$!}" | |
25 | + end | |
26 | +end | |
27 | + | |
28 | +Delayed::Worker.backend = BACKENDS.first | |
29 | + | |
30 | +# Add this directory so the ActiveSupport autoloading works | |
31 | +ActiveSupport::Dependencies.load_paths << File.dirname(__FILE__) | ... | ... |
... | ... | @@ -0,0 +1,214 @@ |
1 | +require 'spec_helper' | |
2 | + | |
3 | +describe Delayed::Worker do | |
4 | + def job_create(opts = {}) | |
5 | + Delayed::Job.create(opts.merge(:payload_object => SimpleJob.new)) | |
6 | + end | |
7 | + | |
8 | + describe "backend=" do | |
9 | + before do | |
10 | + @clazz = Class.new | |
11 | + Delayed::Worker.backend = @clazz | |
12 | + end | |
13 | + | |
14 | + it "should set the Delayed::Job constant to the backend" do | |
15 | + Delayed::Job.should == @clazz | |
16 | + end | |
17 | + | |
18 | + it "should set backend with a symbol" do | |
19 | + Delayed::Worker.backend = :active_record | |
20 | + Delayed::Worker.backend.should == Delayed::Backend::ActiveRecord::Job | |
21 | + end | |
22 | + end | |
23 | + | |
24 | + BACKENDS.each do |backend| | |
25 | + describe "with the #{backend} backend" do | |
26 | + before do | |
27 | + Delayed::Worker.backend = backend | |
28 | + Delayed::Job.delete_all | |
29 | + | |
30 | + @worker = Delayed::Worker.new(:max_priority => nil, :min_priority => nil, :quiet => true) | |
31 | + | |
32 | + SimpleJob.runs = 0 | |
33 | + end | |
34 | + | |
35 | + describe "running a job" do | |
36 | + it "should fail after Worker.max_run_time" do | |
37 | + begin | |
38 | + old_max_run_time = Delayed::Worker.max_run_time | |
39 | + Delayed::Worker.max_run_time = 1.second | |
40 | + @job = Delayed::Job.create :payload_object => LongRunningJob.new | |
41 | + @worker.run(@job) | |
42 | + @job.reload.last_error.should =~ /expired/ | |
43 | + @job.attempts.should == 1 | |
44 | + ensure | |
45 | + Delayed::Worker.max_run_time = old_max_run_time | |
46 | + end | |
47 | + end | |
48 | + end | |
49 | + | |
50 | + context "worker prioritization" do | |
51 | + before(:each) do | |
52 | + @worker = Delayed::Worker.new(:max_priority => 5, :min_priority => -5, :quiet => true) | |
53 | + end | |
54 | + | |
55 | + it "should only work_off jobs that are >= min_priority" do | |
56 | + job_create(:priority => -10) | |
57 | + job_create(:priority => 0) | |
58 | + @worker.work_off | |
59 | + | |
60 | + SimpleJob.runs.should == 1 | |
61 | + end | |
62 | + | |
63 | + it "should only work_off jobs that are <= max_priority" do | |
64 | + job_create(:priority => 10) | |
65 | + job_create(:priority => 0) | |
66 | + | |
67 | + @worker.work_off | |
68 | + | |
69 | + SimpleJob.runs.should == 1 | |
70 | + end | |
71 | + end | |
72 | + | |
73 | + context "while running with locked and expired jobs" do | |
74 | + before(:each) do | |
75 | + @worker.name = 'worker1' | |
76 | + end | |
77 | + | |
78 | + it "should not run jobs locked by another worker" do | |
79 | + job_create(:locked_by => 'other_worker', :locked_at => (Delayed::Job.db_time_now - 1.minutes)) | |
80 | + lambda { @worker.work_off }.should_not change { SimpleJob.runs } | |
81 | + end | |
82 | + | |
83 | + it "should run open jobs" do | |
84 | + job_create | |
85 | + lambda { @worker.work_off }.should change { SimpleJob.runs }.from(0).to(1) | |
86 | + end | |
87 | + | |
88 | + it "should run expired jobs" do | |
89 | + expired_time = Delayed::Job.db_time_now - (1.minutes + Delayed::Worker.max_run_time) | |
90 | + job_create(:locked_by => 'other_worker', :locked_at => expired_time) | |
91 | + lambda { @worker.work_off }.should change { SimpleJob.runs }.from(0).to(1) | |
92 | + end | |
93 | + | |
94 | + it "should run own jobs" do | |
95 | + job_create(:locked_by => @worker.name, :locked_at => (Delayed::Job.db_time_now - 1.minutes)) | |
96 | + lambda { @worker.work_off }.should change { SimpleJob.runs }.from(0).to(1) | |
97 | + end | |
98 | + end | |
99 | + | |
100 | + describe "failed jobs" do | |
101 | + before do | |
102 | + # reset defaults | |
103 | + Delayed::Worker.destroy_failed_jobs = true | |
104 | + Delayed::Worker.max_attempts = 25 | |
105 | + | |
106 | + @job = Delayed::Job.enqueue ErrorJob.new | |
107 | + end | |
108 | + | |
109 | + it "should record last_error when destroy_failed_jobs = false, max_attempts = 1" do | |
110 | + Delayed::Worker.destroy_failed_jobs = false | |
111 | + Delayed::Worker.max_attempts = 1 | |
112 | + @worker.run(@job) | |
113 | + @job.reload | |
114 | + @job.last_error.should =~ /did not work/ | |
115 | + @job.last_error.should =~ /worker_spec.rb/ | |
116 | + @job.attempts.should == 1 | |
117 | + @job.failed_at.should_not be_nil | |
118 | + end | |
119 | + | |
120 | + it "should re-schedule jobs after failing" do | |
121 | + @worker.run(@job) | |
122 | + @job.reload | |
123 | + @job.last_error.should =~ /did not work/ | |
124 | + @job.last_error.should =~ /sample_jobs.rb:8:in `perform'/ | |
125 | + @job.attempts.should == 1 | |
126 | + @job.run_at.should > Delayed::Job.db_time_now - 10.minutes | |
127 | + @job.run_at.should < Delayed::Job.db_time_now + 10.minutes | |
128 | + end | |
129 | + end | |
130 | + | |
131 | + context "reschedule" do | |
132 | + before do | |
133 | + @job = Delayed::Job.create :payload_object => SimpleJob.new | |
134 | + end | |
135 | + | |
136 | + share_examples_for "any failure more than Worker.max_attempts times" do | |
137 | + context "when the job's payload has an #on_permanent_failure hook" do | |
138 | + before do | |
139 | + @job = Delayed::Job.create :payload_object => OnPermanentFailureJob.new | |
140 | + @job.payload_object.should respond_to :on_permanent_failure | |
141 | + end | |
142 | + | |
143 | + it "should run that hook" do | |
144 | + @job.payload_object.should_receive :on_permanent_failure | |
145 | + Delayed::Worker.max_attempts.times { @worker.reschedule(@job) } | |
146 | + end | |
147 | + end | |
148 | + | |
149 | + context "when the job's payload has no #on_permanent_failure hook" do | |
150 | + # It's a little tricky to test this in a straightforward way, | |
151 | + # because putting a should_not_receive expectation on | |
152 | + # @job.payload_object.on_permanent_failure makes that object | |
153 | + # incorrectly return true to | |
154 | + # payload_object.respond_to? :on_permanent_failure, which is what | |
155 | + # reschedule uses to decide whether to call on_permanent_failure. | |
156 | + # So instead, we just make sure that the payload_object as it | |
157 | + # already stands doesn't respond_to? on_permanent_failure, then | |
158 | + # shove it through the iterated reschedule loop and make sure we | |
159 | + # don't get a NoMethodError (caused by calling that nonexistent | |
160 | + # on_permanent_failure method). | |
161 | + | |
162 | + before do | |
163 | + @job.payload_object.should_not respond_to(:on_permanent_failure) | |
164 | + end | |
165 | + | |
166 | + it "should not try to run that hook" do | |
167 | + lambda do | |
168 | + Delayed::Worker.max_attempts.times { @worker.reschedule(@job) } | |
169 | + end.should_not raise_exception(NoMethodError) | |
170 | + end | |
171 | + end | |
172 | + end | |
173 | + | |
174 | + context "and we want to destroy jobs" do | |
175 | + before do | |
176 | + Delayed::Worker.destroy_failed_jobs = true | |
177 | + end | |
178 | + | |
179 | + it_should_behave_like "any failure more than Worker.max_attempts times" | |
180 | + | |
181 | + it "should be destroyed if it failed more than Worker.max_attempts times" do | |
182 | + @job.should_receive(:destroy) | |
183 | + Delayed::Worker.max_attempts.times { @worker.reschedule(@job) } | |
184 | + end | |
185 | + | |
186 | + it "should not be destroyed if failed fewer than Worker.max_attempts times" do | |
187 | + @job.should_not_receive(:destroy) | |
188 | + (Delayed::Worker.max_attempts - 1).times { @worker.reschedule(@job) } | |
189 | + end | |
190 | + end | |
191 | + | |
192 | + context "and we don't want to destroy jobs" do | |
193 | + before do | |
194 | + Delayed::Worker.destroy_failed_jobs = false | |
195 | + end | |
196 | + | |
197 | + it_should_behave_like "any failure more than Worker.max_attempts times" | |
198 | + | |
199 | + it "should be failed if it failed more than Worker.max_attempts times" do | |
200 | + @job.reload.failed_at.should == nil | |
201 | + Delayed::Worker.max_attempts.times { @worker.reschedule(@job) } | |
202 | + @job.reload.failed_at.should_not == nil | |
203 | + end | |
204 | + | |
205 | + it "should not be failed if it failed fewer than Worker.max_attempts times" do | |
206 | + (Delayed::Worker.max_attempts - 1).times { @worker.reschedule(@job) } | |
207 | + @job.reload.failed_at.should == nil | |
208 | + end | |
209 | + end | |
210 | + end | |
211 | + end | |
212 | + end | |
213 | + | |
214 | +end | ... | ... |
... | ... | @@ -0,0 +1 @@ |
1 | +require File.expand_path(File.join(File.dirname(__FILE__), '..', 'lib', 'delayed', 'tasks')) | ... | ... |