Assume we have the following code (We are using pundit for authorization and Active model serializer to format json response)
 

class User
  has_many :projects
end
 
class Project
end
 
class ProjectPolicy < ApplicationPolicy
  attr_accessor :user, :project
 
  def initialize(user, project)
    @user    = user
    @project = project
  end
 
  def show?
    user.member_of?(project)
  end
 
  def destroy?
    user.admin_of?(project)
  end
end
 
class ProjectsController
 
  def show
    authorize @project
    render json: @project
  end
 
  def destroy
    authorize @project
    @project.destroy
    head :no_content
  end
end
 
class ProjectSerializer
  attributes :name, :descroption 
end
When we request a project resource, we will get:
 
# http://myapp/projects/1
{
  "project": {
    "id": 1,
    "name": "My project",
    "description": "My project description"
  }
}
When a user tries to do any action on a project, we first authorize this user to make sure he can do this action on this project, everything seems fine.

But what about client side validation? If the mobile app or javascript code in the browser consuming this api wants to do client side validation for better user experience and to save the backend server from useless requests, then we have to return the policy with the json response like this.
 
class ProjectSerializer
  attributes :name, :descroption, :policy
 
  def policy
    project_policy = ProjectPolicy.new(scope.current_user, self.object)
    {
      show: project_policy.show?,
      destroy: project_policy.destroy?,
    }
  end
end
And in case you don’t already do this, add this line to application controller, such that we can use
methods available in rails html views (like current_user, which will be available within scope)
serialization_scope :view_context # in application_controller.rb

# http://myapp/projects/1

{
  "project": {
    "id": 1,
    "name": "My project",
    "description": "My project description",
    "policy": {
      "show": true,
      "destroy": false
    }
  }
But then we have to remember that whenever we add a policy method to our policy class and use that method in our controller, we have then to add that method to the json response, so that the API consumer is aware of this policy.

The problem is that we may easily forget to do this, and new developers contributing to our code will most likely forget, as this is not an intuitive action to do, new developer must be told about it and they must remember it! A sign of a source of bugs.  

A simple solution I used is to use ruby metaprogramming to add the whole dependency thing to the API response, here is the black magic code (places in an initializer):
 
ActiveModel::Serializer.class_eval do
  def self.add_policies
    attribute :policy
 
    def policy
      policy_instance = scope.policy(self.object)
      policy_instance.as_json
    end
  end
end
And override as_json in ApplicationPolicy so that it returns only public methods ending with question mark (which indicates they are policy methods)
 
ApplicationPolicy
  def as_json
    policies = {}
    public_methods(false).select{ |method| method.to_s =~ /\?$/ }.each do |policy_method|
      can_perform_policy_method = send(policy_method)
      policies["#{policy_method.to_s.sub(/\?$/, '')}"] = can_perform_policy_method
    end
    policies   
  end
end
Then in ProjectSerializer, we just call add_policies
 
class ProjectSerializer
  add_policies
  attributes :name, :descroption 
end
And we have all our policy methods automatically added to to the project json object .

Today I faced the task to save documents using rails paperclip saving them in AWS S3, however I needed to do this in background
in order not to block the user. I didn't want to use delayed_paperclip as it is used with only delayed_job or Resque, I wanted
something that will run on all ActiveJob adapters.

My idea is inspired originally , however that solution has points that can be enhanced: you need to create another local attachment for each attachment you add to your model, you may forget to add a migration to create it, you may forget to add it to the model, and finally: it add more columns to your model than needed.

The idea I used is as follows:
- Before saving the attachment, change the storage to Filesystem, so that it will write the document locally -not in public directory-, not to S3
- After saving the attachment, create a background process to post the document to AWS S3

The code is very basic so it can be enhanced, however this is the basic idea which is working fine till now.

 

# in your model like this
has_attached_file :attachment, process_in_background: true
# use paperclip normally, you don't have to do/change anything!
my_model.attachment = some_file


# Put this code in intializers directroy
module Extensions
  module Paperclip
    module Attachment
      attr_accessor :uploading_to_s3

      def uploading_to_s3?
        uploading_to_s3
      end
    end

    module S3BackgroundHandler
      extend ActiveSupport::Concern

      included do
        class << self


          def has_attached_file(name, options = {})
            super(name, options)

            all_options = ::Paperclip::Attachment.default_options.merge(options)
            process_in_background(name) if all_options[:process_in_background]
          end

          def process_in_background(name)
            original_options = {}
            save_to_disk = Proc.new{|name, attachment| send("#{name}_updated_at_changed?") && !attachment.uploading_to_s3?}

            before_save do
              attachment = send(name)
              if instance_exec(name, attachment, &save_to_disk)
                original_options = attachment.options.dup
                attachment.options.merge!({
                  path: LOCAL_PAPERCLIP_ATTACHMENT_PATH
                })
                attachment.extend ::Paperclip::Storage::Filesystem
              end
            end

            after_save do
              attachment = send(name)
              if instance_exec(name, attachment, &save_to_disk)
                queued_for_delete = attachment.instance_eval("@queued_for_delete")
                UploadDocumentToS3Job.perform_later(self.class.name, self.id, name.to_s, queued_for_delete)
                queued_for_delete.clear
                attachment.options.merge!(original_options)
              end
            end
          end

        end
      end

      LOCAL_PAPERCLIP_ATTACHMENT_PATH = ":rails_root/files/:class/:attachment/:id_partition/:style/:filename"

    end

  end
end

Paperclip::Attachment.include Extensions::Paperclip::Attachment
ApplicationRecord.include Extensions::Paperclip::S3BackgroundHandler


# Put this code in jobs directory
class UploadDocumentToS3Job < ApplicationJob
  queue_as :upload_document_to_s3

  def perform(class_name, id, attachment_name, queued_for_delete)
    record = class_name.constantize.find(id)
    attachment = record.send(attachment_name)

    # deleting
    s3_bucket = attachment.s3_bucket
    queued_for_delete.each do |path|
      begin
        attachment.send(:log, "deleting #{path}")
        s3_bucket.object(path.sub(%r{\A/}, "")).delete
      rescue Aws::Errors::ServiceError => e
        # Ignore this.
      end
    end

    # uploading
    original_options = attachment.options.dup
    attachment.options.merge!({
      path: ApplicationRecord::LOCAL_PAPERCLIP_ATTACHMENT_PATH
    })
    file_path = attachment.path
    attachment.options.merge!(original_options)

    File.open(file_path) do |file|
      attachment.uploading_to_s3 = true
      attachment.assign(file)
      attachment.reprocess!
      attachment.uploading_to_s3 = false
    end
    File.delete(file_path) rescue nil
  end

end

When inserting large amount of data to DB in Rails, large number of insert statements take a lot of time. We can reduce this time dramatically using mysqlimport, which is mysql tool to insert bulk data into the DB quickly, quoting mysql words about mysqlimport: "reads rows from a text file into a table at a very high speed". To use mysqlimport in updating records also, not only in inserting new records, we used a common trick for this problem: Creating a temporary table, inserting updated records(with ids) in this table, join the temporary table with target table on id column and update target table columns accordingly, then drop the temporary table. You can find this idea illustrated for exampel here: 


So here is a helper class to use it within Rails application.
 
class SqlWriter

  ID_STR = 'id'
  CREATED_AT_STR = 'created_at'
  UPDATED_AT_STR = 'updated_at'
  NULL_STR = '\N'
  COMMA_STR = ','
  attr_accessor :insert_sql_file, :update_sql_file
  # klass is the class of the records we will deal with
  # sql_dir_path is the directory which will contain the sql data file(text file).
  def initialize(klass, sql_dir_path)
    @klass = klass
    @temp_table_name = "temp_#{klass.table_name}_#{Time.now.to_s(:db).gsub(/-| |:/,'_')}_#{SecureRandom.hex[0..10]}"
    @insert_sql_file = File.new("#{sql_dir_path}/#{klass.table_name}.txt", 'w')
    @update_sql_file = File.new("#{sql_dir_path}/#{@temp_table_name}.txt", 'w')
    @current_time_in_db_format = Time.now.to_s(:db)
    @insert_fields = klass.new.attributes.except(ID_STR).keys
    @update_fields = klass.new.attributes.keys
    @records_need_update = false
  end

  def write_record_to_sql_file(record)
    row_data = get_sql_row(record)
    if record.new_record?
      @insert_sql_file.write("#{row_data}\n")
    else
      @update_sql_file.write("#{row_data}\n")
    end
  end

  def insert_records_to_database
    @insert_sql_file.close
    @update_sql_file.close
    config   = Rails.configuration.database_configuration
    database = config[Rails.env]["database"]
    username = config[Rails.env]["username"]
    password = config[Rails.env]["password"]
    host = config[Rails.env]["host"]
    insert_columns_orders = @insert_fields.join(',')
    `mysqlimport -u #{username} -p#{password} -h #{host} --columns='#{insert_columns_orders}' --local --fields-terminated-by=',' #{database} #{Shellwords.escape(@insert_sql_file.path)}`
    if @records_need_update
      ActiveRecord::Base.connection.execute("CREATE TABLE #{@temp_table_name} LIKE #{@klass.table_name};")
      update_columns_orders = @update_fields.join(',')
      `mysqlimport -u #{username} -p#{password} -h #{host} --columns='#{update_columns_orders}' --local --fields-terminated-by=',' #{database} #{Shellwords.escape(@update_sql_file.path)}`
      set_fields = @insert_fields.map{|field| "#{@klass.table_name}.#{field}=#{@temp_table_name}.#{field}"}.join(',')
      ActiveRecord::Base.connection.execute("UPDATE #{@klass.table_name} INNER JOIN #{@temp_table_name} ON #{@klass.table_name}.id = #{@temp_table_name}.id SET #{set_fields}")
      ActiveRecord::Base.connection.execute("DROP TABLE #{@temp_table_name}")
    end
    File.delete(@update_sql_file)
  end

  private
    def get_sql_row(record)
      if record.new_record?
        result = record.attributes.except(ID_STR).values
        fields = @insert_fields
      else
        result =  record.attributes.values
        fields = @update_fields
        @records_need_update = true
      end
      result.each_with_index do |item, index|
        if item.class == Date || item.class == Time
          result[index] = item.to_s(:db)
        elsif item == true || item == false
          result[index] = item ? 1 : 0
        elsif item == nil
          if fields[index] == CREATED_AT_STR || fields[index] == UPDATED_AT_STR
            result[index] = @current_time_in_db_format
          else
            result[index] = NULL_STR
          end
        end
      end
      result.join(COMMA_STR)
    end
end

For example, assume that we are inserting large number of records of User model:
 
sql_file_dir = "path/to/some/dir"
sql_writer = BulkDataWriter.new(User, sql_file_dir)
alot_of_data.each do |data|
  #......
  user = User.new(user_attributes)
  sql_writer.write_record_to_sql_file(user)
end
sql_writer.insert_records_to_database 
And you will have you data inserted to DB!