这是我的评论,然后我进一步完善了它 from uuid import uuid4 from threading import current_thread from django.core.cache.backends.base import BaseCache from django.core.cache.backends.locmem import LocMemCache from django.utils.synch import RWLock # Global in-memory store of cache data. Keyed by name, to provides multiple # named local memory caches. _caches = {} _expire_info = {} _locks = {} class RequestCache(LocMemCache): """ RequestCache is a customized LocMemCache with a destructor, ensuring that creating and destroying RequestCache objects over and over doesn't leak memory. """ def __init__(self): # We explicitly do not call super() here, because while we want # BaseCache.__init__() to run, we *don't* want LocMemCache.__init__() to run. BaseCache.__init__(self, {}) # Use a name that is guaranteed to be unique for each RequestCache instance. # This ensures that it will always be safe to call del _caches[self.name] in # the destructor, even when multiple threads are doing so at the same time. self.name = uuid4() self._cache = _caches.setdefault(self.name, {}) self._expire_info = _expire_info.setdefault(self.name, {}) self._lock = _locks.setdefault(self.name, RWLock()) def __del__(self): del _caches[self.name] del _expire_info[self.name] del _locks[self.name] class RequestCacheMiddleware(object): """ Creates a cache instance that persists only for the duration of the current request. """ _request_caches = {} def process_request(self, request): # The RequestCache object is keyed on the current thread because each request is # processed on a single thread, allowing us to retrieve the correct RequestCache # object in the other functions. self._request_caches[current_thread()] = RequestCache() def process_response(self, request, response): self.delete_cache() return response def process_exception(self, request, exception): self.delete_cache() @classmethod def get_cache(cls): """ Retrieve the current request's cache. Returns None if RequestCacheMiddleware is not currently installed via MIDDLEWARE_CLASSES, or if there is no active request. """ return cls._request_caches.get(current_thread()) @classmethod def clear_cache(cls): """ Clear the current request's cache. """ cache = cls.get_cache() if cache: cache.clear() @classmethod def delete_cache(cls): """ Delete the current request's cache object to avoid leaking memory. """ cache = cls._request_caches.pop(current_thread(), None) del cache

这是我的评论,然后我进一步完善了它 from uuid import uuid4 from threading import current_thread from django.core.cache.backends.base import BaseCache from django.core.cache.backends.locmem import LocMemCache from django.utils.synch import RWLock # Global in-memory store of cache data. Keyed by name, to provides multiple # named local memory caches. _caches = {} _expire_info = {} _locks = {} class RequestCache(LocMemCache): """ RequestCache is a customized LocMemCache with a destructor, ensuring that creating and destroying RequestCache objects over and over doesn't leak memory. """ def __init__(self): # We explicitly do not call super() here, because while we want # BaseCache.__init__() to run, we *don't* want LocMemCache.__init__() to run. BaseCache.__init__(self, {}) # Use a name that is guaranteed to be unique for each RequestCache instance. # This ensures that it will always be safe to call del _caches[self.name] in # the destructor, even when multiple threads are doing so at the same time. self.name = uuid4() self._cache = _caches.setdefault(self.name, {}) self._expire_info = _expire_info.setdefault(self.name, {}) self._lock = _locks.setdefault(self.name, RWLock()) def __del__(self): del _caches[self.name] del _expire_info[self.name] del _locks[self.name] class RequestCacheMiddleware(object): """ Creates a cache instance that persists only for the duration of the current request. """ _request_caches = {} def process_request(self, request): # The RequestCache object is keyed on the current thread because each request is # processed on a single thread, allowing us to retrieve the correct RequestCache # object in the other functions. self._request_caches[current_thread()] = RequestCache() def process_response(self, request, response): self.delete_cache() return response def process_exception(self, request, exception): self.delete_cache() @classmethod def get_cache(cls): """ Retrieve the current request's cache. Returns None if RequestCacheMiddleware is not currently installed via MIDDLEWARE_CLASSES, or if there is no active request. """ return cls._request_caches.get(current_thread()) @classmethod def clear_cache(cls): """ Clear the current request's cache. """ cache = cls.get_cache() if cache: cache.clear() @classmethod def delete_cache(cls): """ Delete the current request's cache object to avoid leaking memory. """ cache = cls._request_caches.pop(current_thread(), None) del cache,django,django-cache,Django,Django Cache,编辑2016-06-15: 我发现了一个简单得多的解决方案,并且有点脸红,因为我没有意识到这从一开始就应该是多么容易 from django.core.cache.backends.base import BaseCache from django.core.cache.backends.locmem import LocMemCache from django.utils.synch import RWLock class RequestCache(LocMemCache): ""

编辑2016-06-15: 我发现了一个简单得多的解决方案,并且有点脸红,因为我没有意识到这从一开始就应该是多么容易

from django.core.cache.backends.base import BaseCache
from django.core.cache.backends.locmem import LocMemCache
from django.utils.synch import RWLock


class RequestCache(LocMemCache):
    """
    RequestCache is a customized LocMemCache which stores its data cache as an instance attribute, rather than
    a global. It's designed to live only as long as the request object that RequestCacheMiddleware attaches it to.
    """

    def __init__(self):
        # We explicitly do not call super() here, because while we want BaseCache.__init__() to run, we *don't*
        # want LocMemCache.__init__() to run, because that would store our caches in its globals.
        BaseCache.__init__(self, {})

        self._cache = {}
        self._expire_info = {}
        self._lock = RWLock()

class RequestCacheMiddleware(object):
    """
    Creates a fresh cache instance as request.cache. The cache instance lives only as long as request does.
    """

    def process_request(self, request):
        request.cache = RequestCache()
from django.core.cache.backends.base import BaseCache
from django.core.cache.backends.locmem import LocMemCache
from django.utils.synch import RWLock


class RequestCache(LocMemCache):
    """
    RequestCache is a customized LocMemCache which stores its data cache as an instance attribute, rather than
    a global. It's designed to live only as long as the request object that RequestCacheMiddleware attaches it to.
    """

    def __init__(self):
        # We explicitly do not call super() here, because while we want BaseCache.__init__() to run, we *don't*
        # want LocMemCache.__init__() to run, because that would store our caches in its globals.
        BaseCache.__init__(self, {})

        self._cache = {}
        self._expire_info = {}
        self._lock = RWLock()

class RequestCacheMiddleware(object):
    """
    Creates a fresh cache instance as request.cache. The cache instance lives only as long as request does.
    """

    def process_request(self, request):
        request.cache = RequestCache()
这样,您就可以使用
request.cache
作为一个缓存实例,它只在
request
存在的时间内存在,并且在请求完成时会被垃圾收集器完全清除


如果您需要从通常不可用的上下文访问
请求
对象,您可以使用所谓的“全局请求中间件”的各种实现之一,该中间件可以在线找到。

问题是关于每个请求的缓存。这个解决方案也会将缓存用于用户的第二个请求。但我认为大多数时候cache.get()和cache.set()或者更好。最初的问题是关于每个请求的缓存,而不是每个线程的缓存。在基于线程池的服务器中,您的实现永远不会过期并导致内存耗尽。请小心此解决方案!随着越来越多的线程被打开以服务于您的用户,“请求”缓存字典将不断填满,而且它永远不会被清除。根据您的Web服务器存储Python全局变量的方式,这可能会导致内存泄漏。是的-清除进程\响应和进程\表达式上的缓存-django cuser中间件插件中有一个很好的例子。看:乍一看这似乎很酷。但仔细看,如果没有SELECT语句,缓存就会失效。似乎是合理的,直到你有多个过程。它不会使所有缓存失效。一个小小的改变是将其存储在请求对象上,以便在请求之间重置。您能先放置2016解决方案吗?这个答案被编译到一个包“django.core.cache.backends.locmem定义了几个全局字典,其中包含对每个LocalMemCache实例缓存数据的引用,并且这些字典永远不会清空。”如果为真,这不是Django的LocalMemCache本身的内存泄漏吗?
MIDDLEWARE_CLASSES = (
    ...
    'myapp.request_cache.RequestCacheMiddleware'
)
from myapp.request_cache import get_request_cache

cache = get_request_cache()
# get the request object's dictionary (rather one of its methods' dictionary)
mycache = request.get_host.__dict__

# check whether we already have our value cached and return it
if mycache.get( 'c_category', False ):
    return mycache['c_category']
else:
    # get some object from the database (a category object in this case)
    c = Category.objects.get( id = cid )

    # cache the database object into a new key in the request object
    mycache['c_category'] = c

    return c
from threading import local
import itertools
from django.db.models.sql.constants import MULTI
from django.db.models.sql.compiler import SQLCompiler
from django.db.models.sql.datastructures import EmptyResultSet
from django.db.models.sql.constants import GET_ITERATOR_CHUNK_SIZE


_thread_locals = local()


def get_sql(compiler):
    ''' get a tuple of the SQL query and the arguments '''
    try:
        return compiler.as_sql()
    except EmptyResultSet:
        pass
    return ('', [])


def execute_sql_cache(self, result_type=MULTI):

    if hasattr(_thread_locals, 'query_cache'):

        sql = get_sql(self)  # ('SELECT * FROM ...', (50)) <= sql string, args tuple
        if sql[0][:6].upper() == 'SELECT':

            # uses the tuple of sql + args as the cache key
            if sql in _thread_locals.query_cache:
                return _thread_locals.query_cache[sql]

            result = self._execute_sql(result_type)
            if hasattr(result, 'next'):

                # only cache if this is not a full first page of a chunked set
                peek = result.next()
                result = list(itertools.chain([peek], result))

                if len(peek) == GET_ITERATOR_CHUNK_SIZE:
                    return result

            _thread_locals.query_cache[sql] = result

            return result

        else:
            # the database has been updated; throw away the cache
            _thread_locals.query_cache = {}

    return self._execute_sql(result_type)


def patch():
    ''' patch the django query runner to use our own method to execute sql '''
    _thread_locals.query_cache = {}
    if not hasattr(SQLCompiler, '_execute_sql'):
        SQLCompiler._execute_sql = SQLCompiler.execute_sql
        SQLCompiler.execute_sql = execute_sql_cache
from threading import currentThread
import weakref

_request_cache = weakref.WeakKeyDictionary()

def get_request_cache():
    return _request_cache.setdefault(currentThread(), {})
from django.utils.lru_cache import lru_cache

def cached_call(func, *args, **kwargs):
    """Very basic temporary cache, will cache results
    for average of 1.5 sec and no more then 3 sec"""
    return _cached_call(int(time.time() / 3), func, *args, **kwargs)


@lru_cache(maxsize=100)
def _cached_call(time, func, *args, **kwargs):
    return func(*args, **kwargs)
favourites = cached_call(get_favourites, request.user)
from django.core.cache.backends.base import BaseCache
from django.core.cache.backends.locmem import LocMemCache
from django.utils.synch import RWLock


class RequestCache(LocMemCache):
    """
    RequestCache is a customized LocMemCache which stores its data cache as an instance attribute, rather than
    a global. It's designed to live only as long as the request object that RequestCacheMiddleware attaches it to.
    """

    def __init__(self):
        # We explicitly do not call super() here, because while we want BaseCache.__init__() to run, we *don't*
        # want LocMemCache.__init__() to run, because that would store our caches in its globals.
        BaseCache.__init__(self, {})

        self._cache = {}
        self._expire_info = {}
        self._lock = RWLock()

class RequestCacheMiddleware(object):
    """
    Creates a fresh cache instance as request.cache. The cache instance lives only as long as request does.
    """

    def process_request(self, request):
        request.cache = RequestCache()
from uuid import uuid4
from threading import current_thread

from django.core.cache.backends.base import BaseCache
from django.core.cache.backends.locmem import LocMemCache
from django.utils.synch import RWLock


# Global in-memory store of cache data. Keyed by name, to provides multiple
# named local memory caches.
_caches = {}
_expire_info = {}
_locks = {}


class RequestCache(LocMemCache):
    """
    RequestCache is a customized LocMemCache with a destructor, ensuring that creating
    and destroying RequestCache objects over and over doesn't leak memory.
    """

    def __init__(self):
        # We explicitly do not call super() here, because while we want
        # BaseCache.__init__() to run, we *don't* want LocMemCache.__init__() to run.
        BaseCache.__init__(self, {})

        # Use a name that is guaranteed to be unique for each RequestCache instance.
        # This ensures that it will always be safe to call del _caches[self.name] in
        # the destructor, even when multiple threads are doing so at the same time.
        self.name = uuid4()
        self._cache = _caches.setdefault(self.name, {})
        self._expire_info = _expire_info.setdefault(self.name, {})
        self._lock = _locks.setdefault(self.name, RWLock())

    def __del__(self):
        del _caches[self.name]
        del _expire_info[self.name]
        del _locks[self.name]


class RequestCacheMiddleware(object):
    """
    Creates a cache instance that persists only for the duration of the current request.
    """

    _request_caches = {}

    def process_request(self, request):
        # The RequestCache object is keyed on the current thread because each request is
        # processed on a single thread, allowing us to retrieve the correct RequestCache
        # object in the other functions.
        self._request_caches[current_thread()] = RequestCache()

    def process_response(self, request, response):
        self.delete_cache()
        return response

    def process_exception(self, request, exception):
        self.delete_cache()

    @classmethod
    def get_cache(cls):
        """
        Retrieve the current request's cache.

        Returns None if RequestCacheMiddleware is not currently installed via 
        MIDDLEWARE_CLASSES, or if there is no active request.
        """
        return cls._request_caches.get(current_thread())

    @classmethod
    def clear_cache(cls):
        """
        Clear the current request's cache.
        """
        cache = cls.get_cache()
        if cache:
            cache.clear()

    @classmethod
    def delete_cache(cls):
        """
        Delete the current request's cache object to avoid leaking memory.
        """
        cache = cls._request_caches.pop(current_thread(), None)
        del cache
from django.core.cache.backends.base import BaseCache
from django.core.cache.backends.locmem import LocMemCache
from django.utils.synch import RWLock


class RequestCache(LocMemCache):
    """
    RequestCache is a customized LocMemCache which stores its data cache as an instance attribute, rather than
    a global. It's designed to live only as long as the request object that RequestCacheMiddleware attaches it to.
    """

    def __init__(self):
        # We explicitly do not call super() here, because while we want BaseCache.__init__() to run, we *don't*
        # want LocMemCache.__init__() to run, because that would store our caches in its globals.
        BaseCache.__init__(self, {})

        self._cache = {}
        self._expire_info = {}
        self._lock = RWLock()

class RequestCacheMiddleware(object):
    """
    Creates a fresh cache instance as request.cache. The cache instance lives only as long as request does.
    """

    def process_request(self, request):
        request.cache = RequestCache()