diff --git a/Makefile b/Makefile
index d2051060c1..1f6dee240f 100644
--- a/Makefile
+++ b/Makefile
@@ -12,7 +12,7 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-VERSION?=7.2.5
+VERSION?=7.4.0
PROJECT?=redis
GH_ORG?=redis
SPRING_PROFILE?=ci
diff --git a/pom.xml b/pom.xml
index fd73e23ebd..1f6369e3b6 100644
--- a/pom.xml
+++ b/pom.xml
@@ -5,7 +5,7 @@
org.springframework.data
spring-data-redis
- 3.5.0-SNAPSHOT
+ 3.5.x-GH-3054-SNAPSHOT
Spring Data Redis
Spring Data module for Redis
diff --git a/src/main/antora/modules/ROOT/pages/appendix.adoc b/src/main/antora/modules/ROOT/pages/appendix.adoc
index 46feff0611..669bf82204 100644
--- a/src/main/antora/modules/ROOT/pages/appendix.adoc
+++ b/src/main/antora/modules/ROOT/pages/appendix.adoc
@@ -8,193 +8,201 @@ link:https://www.springframework.org/schema/redis/spring-redis-1.0.xsd[Spring Da
[[supported-commands]]
== Supported Commands
+
.Redis commands supported by `RedisTemplate`
[width="50%",cols="<2,^1",options="header"]
|=========================================================
|Command |Template Support
-|APPEND |X
-|AUTH |X
-|BGREWRITEAOF |X
-|BGSAVE |X
-|BITCOUNT |X
-|BITFIELD |X
-|BITOP |X
-|BLPOP |X
-|BRPOP |X
-|BRPOPLPUSH |X
-|CLIENT KILL |X
-|CLIENT GETNAME |X
-|CLIENT LIST |X
-|CLIENT SETNAME |X
-|CLUSTER SLOTS |-
-|COMMAND |-
-|COMMAND COUNT |-
-|COMMAND GETKEYS |-
-|COMMAND INFO |-
-|CONFIG GET |X
-|CONFIG RESETSTAT |X
-|CONFIG REWRITE |-
-|CONFIG SET |X
-|DBSIZE |X
-|DEBUG OBJECT |-
-|DEBUG SEGFAULT |-
-|DECR |X
-|DECRBY |X
-|DEL |X
-|DISCARD |X
-|DUMP |X
-|ECHO |X
-|EVAL |X
-|EVALSHA |X
-|EXEC |X
-|EXISTS |X
-|EXPIRE |X
-|EXPIREAT |X
-|FLUSHALL |X
-|FLUSHDB |X
-|GEOADD |X
-|GEODIST |X
-|GEOHASH |X
-|GEOPOS |X
-|GEORADIUS |X
-|GEORADIUSBYMEMBER |X
-|GEOSEARCH |X
-|GEOSEARCHSTORE |X
-|GET |X
-|GETBIT |X
-|GETRANGE |X
-|GETSET |X
-|HDEL |X
-|HEXISTS |X
-|HGET |X
-|HGETALL |X
-|HINCRBY |X
-|HINCRBYFLOAT |X
-|HKEYS |X
-|HLEN |X
-|HMGET |X
-|HMSET |X
-|HSCAN |X
-|HSET |X
-|HSETNX |X
-|HVALS |X
-|INCR |X
-|INCRBY |X
-|INCRBYFLOAT |X
-|INFO |X
-|KEYS |X
-|LASTSAVE |X
-|LINDEX |X
-|LINSERT |X
-|LLEN |X
-|LPOP |X
-|LPUSH |X
-|LPUSHX |X
-|LRANGE |X
-|LREM |X
-|LSET |X
-|LTRIM |X
-|MGET |X
-|MIGRATE |-
-|MONITOR |-
-|MOVE |X
-|MSET |X
-|MSETNX |X
-|MULTI |X
-|OBJECT |-
-|PERSIST |X
-|PEXIPRE |X
-|PEXPIREAT |X
-|PFADD |X
-|PFCOUNT |X
-|PFMERGE |X
-|PING |X
-|PSETEX |X
-|PSUBSCRIBE |X
-|PTTL |X
-|PUBLISH |X
-|PUBSUB |-
-|PUBSUBSCRIBE |-
-|QUIT |X
-|RANDOMKEY |X
-|RENAME |X
-|RENAMENX |X
-|REPLICAOF |X
-|RESTORE |X
-|ROLE |-
-|RPOP |X
-|RPOPLPUSH |X
-|RPUSH |X
-|RPUSHX |X
-|SADD |X
-|SAVE |X
-|SCAN |X
-|SCARD |X
-|SCRIPT EXITS |X
-|SCRIPT FLUSH |X
-|SCRIPT KILL |X
-|SCRIPT LOAD |X
-|SDIFF |X
-|SDIFFSTORE |X
-|SELECT |X
-|SENTINEL FAILOVER |X
+|APPEND |X
+|AUTH |X
+|BGREWRITEAOF |X
+|BGSAVE |X
+|BITCOUNT |X
+|BITFIELD |X
+|BITOP |X
+|BLPOP |X
+|BRPOP |X
+|BRPOPLPUSH |X
+|CLIENT KILL |X
+|CLIENT GETNAME |X
+|CLIENT LIST |X
+|CLIENT SETNAME |X
+|CLUSTER SLOTS |-
+|COMMAND |-
+|COMMAND COUNT |-
+|COMMAND GETKEYS |-
+|COMMAND INFO |-
+|CONFIG GET |X
+|CONFIG RESETSTAT |X
+|CONFIG REWRITE |-
+|CONFIG SET |X
+|DBSIZE |X
+|DEBUG OBJECT |-
+|DEBUG SEGFAULT |-
+|DECR |X
+|DECRBY |X
+|DEL |X
+|DISCARD |X
+|DUMP |X
+|ECHO |X
+|EVAL |X
+|EVALSHA |X
+|EXEC |X
+|EXISTS |X
+|EXPIRE |X
+|EXPIREAT |X
+|FLUSHALL |X
+|FLUSHDB |X
+|GEOADD |X
+|GEODIST |X
+|GEOHASH |X
+|GEOPOS |X
+|GEORADIUS |X
+|GEORADIUSBYMEMBER |X
+|GEOSEARCH |X
+|GEOSEARCHSTORE |X
+|GET |X
+|GETBIT |X
+|GETRANGE |X
+|GETSET |X
+|HDEL |X
+|HEXISTS |X
+|HEXPIRE |X
+|HEXPIREAT |X
+|HPEXPIRE |X
+|HPEXPIREAT |X
+|HPERSIST |X
+|HTTL |X
+|HPTTL |X
+|HGET |X
+|HGETALL |X
+|HINCRBY |X
+|HINCRBYFLOAT |X
+|HKEYS |X
+|HLEN |X
+|HMGET |X
+|HMSET |X
+|HSCAN |X
+|HSET |X
+|HSETNX |X
+|HVALS |X
+|INCR |X
+|INCRBY |X
+|INCRBYFLOAT |X
+|INFO |X
+|KEYS |X
+|LASTSAVE |X
+|LINDEX |X
+|LINSERT |X
+|LLEN |X
+|LPOP |X
+|LPUSH |X
+|LPUSHX |X
+|LRANGE |X
+|LREM |X
+|LSET |X
+|LTRIM |X
+|MGET |X
+|MIGRATE |-
+|MONITOR |-
+|MOVE |X
+|MSET |X
+|MSETNX |X
+|MULTI |X
+|OBJECT |-
+|PERSIST |X
+|PEXIPRE |X
+|PEXPIREAT |X
+|PFADD |X
+|PFCOUNT |X
+|PFMERGE |X
+|PING |X
+|PSETEX |X
+|PSUBSCRIBE |X
+|PTTL |X
+|PUBLISH |X
+|PUBSUB |-
+|PUBSUBSCRIBE |-
+|QUIT |X
+|RANDOMKEY |X
+|RENAME |X
+|RENAMENX |X
+|REPLICAOF |X
+|RESTORE |X
+|ROLE |-
+|RPOP |X
+|RPOPLPUSH |X
+|RPUSH |X
+|RPUSHX |X
+|SADD |X
+|SAVE |X
+|SCAN |X
+|SCARD |X
+|SCRIPT EXITS |X
+|SCRIPT FLUSH |X
+|SCRIPT KILL |X
+|SCRIPT LOAD |X
+|SDIFF |X
+|SDIFFSTORE |X
+|SELECT |X
+|SENTINEL FAILOVER |X
|SENTINEL GET-MASTER-ADD-BY-NAME |-
-|SENTINEL MASTER | -
-|SENTINEL MASTERS |X
-|SENTINEL MONITOR |X
-|SENTINEL REMOVE |X
-|SENTINEL RESET |-
-|SENTINEL SET |-
-|SENTINEL SLAVES |X
-|SET |X
-|SETBIT |X
-|SETEX |X
-|SETNX |X
-|SETRANGE |X
-|SHUTDOWN |X
-|SINTER |X
-|SINTERSTORE |X
-|SISMEMBER |X
-|SLAVEOF |X
-|SLOWLOG |-
-|SMEMBERS |X
-|SMOVE |X
-|SORT |X
-|SPOP |X
-|SRANDMEMBER |X
-|SREM |X
-|SSCAN |X
-|STRLEN |X
-|SUBSCRIBE |X
-|SUNION |X
-|SUNIONSTORE |X
-|SYNC |-
-|TIME |X
-|TTL |X
-|TYPE |X
-|UNSUBSCRIBE |X
-|UNWATCH |X
-|WATCH |X
-|ZADD |X
-|ZCARD |X
-|ZCOUNT |X
-|ZINCRBY |X
-|ZINTERSTORE |X
-|ZLEXCOUNT |-
-|ZRANGE |X
-|ZRANGEBYLEX |-
-|ZREVRANGEBYLEX |-
-|ZRANGEBYSCORE |X
-|ZRANGESTORE |X
-|ZRANK |X
-|ZREM |X
-|ZREMRANGEBYLEX |-
-|ZREMRANGEBYRANK |X
-|ZREVRANGE |X
-|ZREVRANGEBYSCORE |X
-|ZREVRANK |X
-|ZSCAN |X
-|ZSCORE |X
-|ZUNINONSTORE |X
+|SENTINEL MASTER | -
+|SENTINEL MASTERS |X
+|SENTINEL MONITOR |X
+|SENTINEL REMOVE |X
+|SENTINEL RESET |-
+|SENTINEL SET |-
+|SENTINEL SLAVES |X
+|SET |X
+|SETBIT |X
+|SETEX |X
+|SETNX |X
+|SETRANGE |X
+|SHUTDOWN |X
+|SINTER |X
+|SINTERSTORE |X
+|SISMEMBER |X
+|SLAVEOF |X
+|SLOWLOG |-
+|SMEMBERS |X
+|SMOVE |X
+|SORT |X
+|SPOP |X
+|SRANDMEMBER |X
+|SREM |X
+|SSCAN |X
+|STRLEN |X
+|SUBSCRIBE |X
+|SUNION |X
+|SUNIONSTORE |X
+|SYNC |-
+|TIME |X
+|TTL |X
+|TYPE |X
+|UNSUBSCRIBE |X
+|UNWATCH |X
+|WATCH |X
+|ZADD |X
+|ZCARD |X
+|ZCOUNT |X
+|ZINCRBY |X
+|ZINTERSTORE |X
+|ZLEXCOUNT |-
+|ZRANGE |X
+|ZRANGEBYLEX |-
+|ZREVRANGEBYLEX |-
+|ZRANGEBYSCORE |X
+|ZRANGESTORE |X
+|ZRANK |X
+|ZREM |X
+|ZREMRANGEBYLEX |-
+|ZREMRANGEBYRANK |X
+|ZREVRANGE |X
+|ZREVRANGEBYSCORE |X
+|ZREVRANK |X
+|ZSCAN |X
+|ZSCORE |X
+|ZUNINONSTORE |X
|=========================================================
diff --git a/src/main/java/org/springframework/data/redis/connection/DefaultStringRedisConnection.java b/src/main/java/org/springframework/data/redis/connection/DefaultStringRedisConnection.java
index 8fe2f2c9f7..17dc41378d 100644
--- a/src/main/java/org/springframework/data/redis/connection/DefaultStringRedisConnection.java
+++ b/src/main/java/org/springframework/data/redis/connection/DefaultStringRedisConnection.java
@@ -23,6 +23,7 @@
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
+
import org.springframework.core.convert.converter.Converter;
import org.springframework.data.geo.Circle;
import org.springframework.data.geo.Distance;
@@ -30,14 +31,24 @@
import org.springframework.data.geo.Metric;
import org.springframework.data.geo.Point;
import org.springframework.data.redis.RedisSystemException;
+import org.springframework.data.redis.connection.Hash.FieldExpirationOptions;
import org.springframework.data.redis.connection.convert.Converters;
import org.springframework.data.redis.connection.convert.ListConverter;
import org.springframework.data.redis.connection.convert.MapConverter;
import org.springframework.data.redis.connection.convert.SetConverter;
-import org.springframework.data.redis.connection.stream.*;
+import org.springframework.data.redis.connection.stream.ByteRecord;
+import org.springframework.data.redis.connection.stream.Consumer;
+import org.springframework.data.redis.connection.stream.MapRecord;
+import org.springframework.data.redis.connection.stream.PendingMessages;
+import org.springframework.data.redis.connection.stream.PendingMessagesSummary;
+import org.springframework.data.redis.connection.stream.ReadOffset;
+import org.springframework.data.redis.connection.stream.RecordId;
import org.springframework.data.redis.connection.stream.StreamInfo.XInfoConsumers;
import org.springframework.data.redis.connection.stream.StreamInfo.XInfoGroups;
import org.springframework.data.redis.connection.stream.StreamInfo.XInfoStream;
+import org.springframework.data.redis.connection.stream.StreamOffset;
+import org.springframework.data.redis.connection.stream.StreamReadOptions;
+import org.springframework.data.redis.connection.stream.StringRecord;
import org.springframework.data.redis.connection.zset.Aggregate;
import org.springframework.data.redis.connection.zset.DefaultTuple;
import org.springframework.data.redis.connection.zset.Tuple;
@@ -2560,12 +2571,105 @@ public Cursor> hScan(byte[] key, ScanOptions options) {
return this.delegate.hScan(key, options);
}
- @Nullable
@Override
public Long hStrLen(byte[] key, byte[] field) {
return convertAndReturn(delegate.hStrLen(key, field), Converters.identityConverter());
}
+ public @Nullable List applyExpiration(byte[] key,
+ org.springframework.data.redis.core.types.Expiration expiration,
+ FieldExpirationOptions options, byte[]... fields) {
+ return this.delegate.applyExpiration(key, expiration, options, fields);
+ }
+
+ @Override
+ public List hExpire(byte[] key, long seconds, FieldExpirationOptions.Condition condition, byte[]... fields) {
+ return this.delegate.hExpire(key, seconds, condition, fields);
+ }
+
+ @Override
+ public List hpExpire(byte[] key, long millis, FieldExpirationOptions.Condition condition, byte[]... fields) {
+ return this.delegate.hpExpire(key, millis, condition, fields);
+ }
+
+ @Override
+ public List hExpireAt(byte[] key, long unixTime, FieldExpirationOptions.Condition condition, byte[]... fields) {
+ return this.delegate.hExpireAt(key, unixTime, condition, fields);
+ }
+
+ @Override
+ public List hpExpireAt(byte[] key, long unixTimeInMillis, FieldExpirationOptions.Condition condition,
+ byte[]... fields) {
+ return this.delegate.hpExpireAt(key, unixTimeInMillis, condition, fields);
+ }
+
+ @Override
+ public List hPersist(byte[] key, byte[]... fields) {
+ return this.delegate.hPersist(key, fields);
+ }
+
+ @Override
+ public List hTtl(byte[] key, byte[]... fields) {
+ return this.delegate.hTtl(key, fields);
+ }
+
+ @Override
+ public List hpTtl(byte[] key, byte[]... fields) {
+ return this.delegate.hpTtl(key, fields);
+ }
+
+ @Override
+ public List hTtl(byte[] key, TimeUnit timeUnit, byte[]... fields) {
+ return this.delegate.hTtl(key, timeUnit, fields);
+ }
+
+ public @Nullable List applyExpiration(String key,
+ org.springframework.data.redis.core.types.Expiration expiration,
+ FieldExpirationOptions options, String... fields) {
+ return applyExpiration(serialize(key), expiration, options, serializeMulti(fields));
+ }
+
+ @Override
+ public List hExpire(String key, long seconds, FieldExpirationOptions.Condition condition, String... fields) {
+ return hExpire(serialize(key), seconds, condition, serializeMulti(fields));
+ }
+
+ @Override
+ public List hpExpire(String key, long millis, FieldExpirationOptions.Condition condition, String... fields) {
+ return hpExpire(serialize(key), millis, condition, serializeMulti(fields));
+ }
+
+ @Override
+ public List hExpireAt(String key, long unixTime, FieldExpirationOptions.Condition condition, String... fields) {
+ return hExpireAt(serialize(key), unixTime, condition, serializeMulti(fields));
+ }
+
+ @Override
+ public List hpExpireAt(String key, long unixTimeInMillis, FieldExpirationOptions.Condition condition,
+ String... fields) {
+ return hpExpireAt(serialize(key), unixTimeInMillis, condition, serializeMulti(fields));
+ }
+
+ @Override
+ public List hPersist(String key, String... fields) {
+ return hPersist(serialize(key), serializeMulti(fields));
+ }
+
+ @Override
+ public List hTtl(String key, String... fields) {
+ return hTtl(serialize(key), serializeMulti(fields));
+ }
+
+ @Override
+ public List hTtl(String key, TimeUnit timeUnit, String... fields) {
+ return hTtl(serialize(key), timeUnit, serializeMulti(fields));
+ }
+
+ @Override
+ public List hpTtl(String key, String... fields) {
+ return hTtl(serialize(key), serializeMulti(fields));
+ }
+
@Override
public void setClientName(byte[] name) {
this.delegate.setClientName(name);
diff --git a/src/main/java/org/springframework/data/redis/connection/DefaultedRedisConnection.java b/src/main/java/org/springframework/data/redis/connection/DefaultedRedisConnection.java
index aa5f6de773..460d883b41 100644
--- a/src/main/java/org/springframework/data/redis/connection/DefaultedRedisConnection.java
+++ b/src/main/java/org/springframework/data/redis/connection/DefaultedRedisConnection.java
@@ -28,6 +28,7 @@
import org.springframework.data.geo.GeoResults;
import org.springframework.data.geo.Metric;
import org.springframework.data.geo.Point;
+import org.springframework.data.redis.connection.Hash.FieldExpirationOptions;
import org.springframework.data.redis.connection.stream.ByteRecord;
import org.springframework.data.redis.connection.stream.Consumer;
import org.springframework.data.redis.connection.stream.MapRecord;
@@ -65,6 +66,7 @@
* @author ihaohong
* @author Dennis Neufeld
* @author Shyngys Sapraliyev
+ * @author Tihomir Mateev
* @since 2.0
*/
@Deprecated
@@ -1470,6 +1472,101 @@ default Long hStrLen(byte[] key, byte[] field) {
return hashCommands().hStrLen(key, field);
}
+ /** @deprecated in favor of {@link RedisConnection#hashCommands()}}. */
+ @Override
+ @Deprecated
+ default List hExpire(byte[] key, long seconds, byte[]... fields) {
+ return hashCommands().hExpire(key, seconds, FieldExpirationOptions.Condition.ALWAYS, fields);
+ }
+
+ /** @deprecated in favor of {@link RedisConnection#hashCommands()}}. */
+ @Override
+ @Deprecated
+ default List hExpire(byte[] key, long seconds, FieldExpirationOptions.Condition condition, byte[]... fields) {
+ return hashCommands().hExpire(key, seconds, condition, fields);
+ }
+
+ /** @deprecated in favor of {@link RedisConnection#hashCommands()}}. */
+ @Override
+ @Deprecated
+ default List hpExpire(byte[] key, long millis, byte[]... fields) {
+ return hashCommands().hpExpire(key, millis, FieldExpirationOptions.Condition.ALWAYS, fields);
+ }
+
+ /** @deprecated in favor of {@link RedisConnection#hashCommands()}}. */
+ @Override
+ @Deprecated
+ default List hpExpire(byte[] key, long millis, FieldExpirationOptions.Condition condition, byte[]... fields) {
+ return hashCommands().hpExpire(key, millis, condition, fields);
+ }
+
+ /** @deprecated in favor of {@link RedisConnection#hashCommands()}}. */
+ @Override
+ @Deprecated
+ default List hExpireAt(byte[] key, long unixTime, byte[]... fields) {
+ return hashCommands().hExpireAt(key, unixTime, FieldExpirationOptions.Condition.ALWAYS, fields);
+ }
+
+ /** @deprecated in favor of {@link RedisConnection#hashCommands()}}. */
+ @Override
+ @Deprecated
+ default List hExpireAt(byte[] key, long unixTime, FieldExpirationOptions.Condition condition,
+ byte[]... fields) {
+ return hashCommands().hExpireAt(key, unixTime, condition, fields);
+ }
+
+ /** @deprecated in favor of {@link RedisConnection#hashCommands()}}. */
+ @Override
+ @Deprecated
+ default List hpExpireAt(byte[] key, long unixTimeInMillis, byte[]... fields) {
+ return hashCommands().hpExpireAt(key, unixTimeInMillis, FieldExpirationOptions.Condition.ALWAYS, fields);
+ }
+
+ /** @deprecated in favor of {@link RedisConnection#hashCommands()}}. */
+ @Override
+ @Deprecated
+ default List hpExpireAt(byte[] key, long unixTimeInMillis, FieldExpirationOptions.Condition condition,
+ byte[]... fields) {
+ return hashCommands().hpExpireAt(key, unixTimeInMillis, condition, fields);
+ }
+
+ /** @deprecated in favor of {@link RedisConnection#hashCommands()}}. */
+ @Override
+ @Deprecated
+ default List hPersist(byte[] key, byte[]... fields) {
+ return hashCommands().hPersist(key, fields);
+ }
+
+ /** @deprecated in favor of {@link RedisConnection#hashCommands()}}. */
+ @Override
+ @Deprecated
+ default List hTtl(byte[] key, byte[]... fields) {
+ return hashCommands().hTtl(key, fields);
+ }
+
+ /** @deprecated in favor of {@link RedisConnection#hashCommands()}}. */
+ @Override
+ @Deprecated
+ default List hTtl(byte[] key, TimeUnit timeUnit, byte[]... fields) {
+ return hashCommands().hTtl(key, timeUnit, fields);
+ }
+
+ /** @deprecated in favor of {@link RedisConnection#hashCommands()}}. */
+ @Override
+ @Deprecated
+ default List hpTtl(byte[] key, byte[]... fields) {
+ return hashCommands().hpTtl(key, fields);
+ }
+
+ /** @deprecated in favor of {@link RedisConnection#hashCommands()}}. */
+ @Override
+ @Deprecated
+ default @Nullable List applyExpiration(byte[] key,
+ org.springframework.data.redis.core.types.Expiration expiration, FieldExpirationOptions options,
+ byte[]... fields) {
+ return hashCommands().applyExpiration(key, expiration, options, fields);
+ }
+
// GEO COMMANDS
/** @deprecated in favor of {@link RedisConnection#geoCommands()}}. */
@@ -1841,9 +1938,8 @@ default T evalSha(byte[] scriptSha, ReturnType returnType, int numKeys, byte
/** @deprecated in favor of {@link RedisConnection#zSetCommands()}}. */
@Override
@Deprecated
- default Long zRangeStoreByLex(byte[] dstKey, byte[] srcKey,
- org.springframework.data.domain.Range range,
- org.springframework.data.redis.connection.Limit limit) {
+ default Long zRangeStoreByLex(byte[] dstKey, byte[] srcKey, org.springframework.data.domain.Range range,
+ org.springframework.data.redis.connection.Limit limit) {
return zSetCommands().zRangeStoreByLex(dstKey, srcKey, range, limit);
}
@@ -1860,7 +1956,7 @@ default Long zRangeStoreRevByLex(byte[] dstKey, byte[] srcKey, org.springframewo
@Deprecated
default Long zRangeStoreByScore(byte[] dstKey, byte[] srcKey,
org.springframework.data.domain.Range extends Number> range,
- org.springframework.data.redis.connection.Limit limit) {
+ org.springframework.data.redis.connection.Limit limit) {
return zSetCommands().zRangeStoreByScore(dstKey, srcKey, range, limit);
}
diff --git a/src/main/java/org/springframework/data/redis/connection/Hash.java b/src/main/java/org/springframework/data/redis/connection/Hash.java
new file mode 100644
index 0000000000..51e326dd2b
--- /dev/null
+++ b/src/main/java/org/springframework/data/redis/connection/Hash.java
@@ -0,0 +1,137 @@
+/*
+ * Copyright 2025 the original author or authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * https://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.springframework.data.redis.connection;
+
+import java.util.Objects;
+
+import org.springframework.lang.Contract;
+import org.springframework.util.ObjectUtils;
+
+/**
+ * Types for interacting with Hash data structures.
+ *
+ * @author Christoph Strobl
+ * @since 3.5
+ */
+public interface Hash {
+
+ /**
+ * Expiration options for Hash Expiation updates.
+ */
+ class FieldExpirationOptions {
+
+ private static final FieldExpirationOptions NONE = new FieldExpirationOptions(Condition.ALWAYS);
+ private final Condition condition;
+
+ FieldExpirationOptions(Condition condition) {
+ this.condition = condition;
+ }
+
+ public static FieldExpirationOptions none() {
+ return NONE;
+ }
+
+ public static FieldExpireOptionsBuilder builder() {
+ return new FieldExpireOptionsBuilder();
+ }
+
+ public Condition getCondition() {
+ return condition;
+ }
+
+ @Override
+ public boolean equals(Object o) {
+ if (o == this) {
+ return true;
+ }
+ if (o == null || getClass() != o.getClass()) {
+ return false;
+ }
+ FieldExpirationOptions that = (FieldExpirationOptions) o;
+ return ObjectUtils.nullSafeEquals(this.condition, that.condition);
+ }
+
+ @Override
+ public int hashCode() {
+ return Objects.hash(condition);
+ }
+
+ public static class FieldExpireOptionsBuilder {
+
+ private Condition condition = Condition.ALWAYS;
+
+ @Contract("-> this")
+ public FieldExpireOptionsBuilder nx() {
+ this.condition = Condition.NX;
+ return this;
+ }
+
+ @Contract("-> this")
+ public FieldExpireOptionsBuilder xx() {
+ this.condition = Condition.XX;
+ return this;
+ }
+
+ @Contract("-> this")
+ public FieldExpireOptionsBuilder gt() {
+ this.condition = Condition.GT;
+ return this;
+ }
+
+ @Contract("-> this")
+ public FieldExpireOptionsBuilder lt() {
+ this.condition = Condition.LT;
+ return this;
+ }
+
+ public FieldExpirationOptions build() {
+ return condition == Condition.ALWAYS ? NONE : new FieldExpirationOptions(condition);
+ }
+
+ }
+
+ public enum Condition {
+
+ /**
+ * Always apply expiration.
+ */
+ ALWAYS,
+
+ /**
+ * Set expiration only when the field has no expiration.
+ */
+ NX,
+
+ /**
+ * Set expiration only when the field has an existing expiration.
+ */
+ XX,
+
+ /**
+ * Set expiration only when the new expiration is greater than current one.
+ */
+ GT,
+
+ /**
+ * Set expiration only when the new expiration is greater than current one.
+ */
+ LT
+
+ }
+
+ }
+
+}
diff --git a/src/main/java/org/springframework/data/redis/connection/ReactiveHashCommands.java b/src/main/java/org/springframework/data/redis/connection/ReactiveHashCommands.java
index 0fae8d30b8..c463737747 100644
--- a/src/main/java/org/springframework/data/redis/connection/ReactiveHashCommands.java
+++ b/src/main/java/org/springframework/data/redis/connection/ReactiveHashCommands.java
@@ -19,15 +19,19 @@
import reactor.core.publisher.Mono;
import java.nio.ByteBuffer;
+import java.time.Duration;
+import java.time.Instant;
import java.util.ArrayList;
import java.util.Collection;
import java.util.Collections;
import java.util.List;
import java.util.Map;
+import java.util.concurrent.TimeUnit;
import java.util.function.Function;
import org.reactivestreams.Publisher;
import org.springframework.dao.InvalidDataAccessApiUsageException;
+import org.springframework.data.redis.connection.Hash.FieldExpirationOptions;
import org.springframework.data.redis.connection.ReactiveRedisConnection.BooleanResponse;
import org.springframework.data.redis.connection.ReactiveRedisConnection.Command;
import org.springframework.data.redis.connection.ReactiveRedisConnection.CommandResponse;
@@ -36,6 +40,7 @@
import org.springframework.data.redis.connection.ReactiveRedisConnection.MultiValueResponse;
import org.springframework.data.redis.connection.ReactiveRedisConnection.NumericResponse;
import org.springframework.data.redis.core.ScanOptions;
+import org.springframework.data.redis.core.types.Expiration;
import org.springframework.lang.Nullable;
import org.springframework.util.Assert;
@@ -44,10 +49,34 @@
*
* @author Christoph Strobl
* @author Mark Paluch
+ * @author Tihomir Mateev
* @since 2.0
*/
public interface ReactiveHashCommands {
+ /**
+ * {@link Command} for hash-bound operations.
+ *
+ * @author Christoph Strobl
+ * @author Tihomir Mateev
+ */
+ class HashFieldsCommand extends KeyCommand {
+
+ private final List fields;
+
+ private HashFieldsCommand(@Nullable ByteBuffer key, List fields) {
+ super(key);
+ this.fields = fields;
+ }
+
+ /**
+ * @return never {@literal null}.
+ */
+ public List getFields() {
+ return fields;
+ }
+ }
+
/**
* {@literal HSET} {@link Command}.
*
@@ -216,15 +245,10 @@ default Mono hMSet(ByteBuffer key, Map fieldVal
* @author Christoph Strobl
* @see Redis Documentation: HGET
*/
- class HGetCommand extends KeyCommand {
-
- private List fields;
+ class HGetCommand extends HashFieldsCommand {
private HGetCommand(@Nullable ByteBuffer key, List fields) {
-
- super(key);
-
- this.fields = fields;
+ super(key, fields);
}
/**
@@ -263,14 +287,7 @@ public HGetCommand from(ByteBuffer key) {
Assert.notNull(key, "Key must not be null");
- return new HGetCommand(key, fields);
- }
-
- /**
- * @return never {@literal null}.
- */
- public List getFields() {
- return fields;
+ return new HGetCommand(key, getFields());
}
}
@@ -394,15 +411,10 @@ default Mono hExists(ByteBuffer key, ByteBuffer field) {
* @author Christoph Strobl
* @see Redis Documentation: HDEL
*/
- class HDelCommand extends KeyCommand {
-
- private final List fields;
+ class HDelCommand extends HashFieldsCommand {
private HDelCommand(@Nullable ByteBuffer key, List fields) {
-
- super(key);
-
- this.fields = fields;
+ super(key, fields);
}
/**
@@ -441,14 +453,7 @@ public HDelCommand from(ByteBuffer key) {
Assert.notNull(key, "Key must not be null");
- return new HDelCommand(key, fields);
- }
-
- /**
- * @return never {@literal null}.
- */
- public List getFields() {
- return fields;
+ return new HDelCommand(key, getFields());
}
}
@@ -842,4 +847,412 @@ default Mono hStrLen(ByteBuffer key, ByteBuffer field) {
* @since 2.1
*/
Flux> hStrLen(Publisher commands);
+
+ /**
+ * @since 3.5
+ */
+ class ExpireCommand extends HashFieldsCommand {
+
+ private final Expiration expiration;
+ private final FieldExpirationOptions options;
+
+ private ExpireCommand(@Nullable ByteBuffer key, List fields, Expiration expiration,
+ FieldExpirationOptions options) {
+
+ super(key, fields);
+
+ this.expiration = expiration;
+ this.options = options;
+ }
+
+ /**
+ * Creates a new {@link ExpireCommand}.
+ *
+ * @param fields the {@code field} names to apply expiration to
+ * @param timeout the actual timeout
+ * @param unit the unit of measure for the {@code timeout}.
+ * @return new instance of {@link ExpireCommand}.
+ */
+ public static ExpireCommand expire(List fields, long timeout, TimeUnit unit) {
+
+ Assert.notNull(fields, "Field must not be null");
+ return expire(fields, Expiration.from(timeout, unit));
+ }
+
+ /**
+ * Creates a new {@link ExpireCommand}.
+ *
+ * @param fields the {@code field} names to apply expiration to.
+ * @param ttl the actual timeout.
+ * @return new instance of {@link ExpireCommand}.
+ */
+ public static ExpireCommand expire(List fields, Duration ttl) {
+
+ Assert.notNull(fields, "Field must not be null");
+ return expire(fields, Expiration.from(ttl));
+ }
+
+ /**
+ * Creates a new {@link ExpireCommand}.
+ *
+ * @param fields the {@code field} names to apply expiration to
+ * @param expiration the {@link Expiration} to apply to the given {@literal fields}.
+ * @return new instance of {@link ExpireCommand}.
+ */
+ public static ExpireCommand expire(List fields, Expiration expiration) {
+ return new ExpireCommand(null, fields, expiration, FieldExpirationOptions.none());
+ }
+
+ /**
+ * Creates a new {@link ExpireCommand}.
+ *
+ * @param fields the {@code field} names to apply expiration to
+ * @param ttl the unix point in time when to expire the given {@literal fields}.
+ * @param precision can be {@link TimeUnit#SECONDS} or {@link TimeUnit#MILLISECONDS}.
+ * @return new instance of {@link ExpireCommand}.
+ */
+ public static ExpireCommand expireAt(List fields, Instant ttl, TimeUnit precision) {
+
+ if (precision.compareTo(TimeUnit.MILLISECONDS) > 0) {
+ return expire(fields, Expiration.unixTimestamp(ttl.getEpochSecond(), TimeUnit.SECONDS));
+ }
+
+ return expire(fields, Expiration.unixTimestamp(ttl.toEpochMilli(), TimeUnit.MILLISECONDS));
+ }
+
+ /**
+ * @param key the {@literal key} from which to expire the {@literal fields} from.
+ * @return new instance of {@link ExpireCommand}.
+ */
+ public ExpireCommand from(ByteBuffer key) {
+ return new ExpireCommand(key, getFields(), expiration, options);
+ }
+
+ /**
+ * @param options additional options to be sent along with the command.
+ * @return new instance of {@link ExpireCommand}.
+ */
+ public ExpireCommand withOptions(FieldExpirationOptions options) {
+ return new ExpireCommand(getKey(), getFields(), getExpiration(), options);
+ }
+
+ public Expiration getExpiration() {
+ return expiration;
+ }
+
+ public FieldExpirationOptions getOptions() {
+ return options;
+ }
+ }
+
+ /**
+ * Expire a given {@literal field} after a given {@link Duration} of time, measured in milliseconds, has passed.
+ *
+ * @param key must not be {@literal null}.
+ * @param field must not be {@literal null}.
+ * @param duration must not be {@literal null}.
+ * @return a {@link Mono} emitting the expiration result - {@code 2} indicating the specific field is deleted already
+ * due to expiration, or provided expiry interval is 0; {@code 1} indicating expiration time is set/updated;
+ * {@code 0} indicating the expiration time is not set (a provided NX | XX | GT | LT condition is not met);
+ * {@code -2} indicating there is no such field;
+ * @see Redis Documentation: HEXPIRE
+ * @since 3.5
+ */
+ default Mono hExpire(ByteBuffer key, Duration duration, ByteBuffer field) {
+
+ Assert.notNull(duration, "Duration must not be null");
+
+ return hExpire(key, duration, Collections.singletonList(field)).singleOrEmpty();
+ }
+
+ /**
+ * Expire a {@link List} of {@literal field} after a given {@link Duration} of time, measured in milliseconds, has
+ * passed.
+ *
+ * @param key must not be {@literal null}.
+ * @param fields must not be {@literal null}.
+ * @param duration must not be {@literal null}.
+ * @return a {@link Flux} emitting the expiration results one by one, {@code 2} indicating the specific field is
+ * deleted already due to expiration, or provided expiry interval is 0; {@code 1} indicating expiration time
+ * is set/updated; {@code 0} indicating the expiration time is not set (a provided NX | XX | GT | LT condition
+ * is not met); {@code -2} indicating there is no such field;
+ * @see Redis Documentation: HEXPIRE
+ * @since 3.5
+ */
+ default Flux hExpire(ByteBuffer key, Duration duration, List fields) {
+
+ Assert.notNull(duration, "Duration must not be null");
+
+ return applyExpiration(Flux.just(ExpireCommand.expire(fields, duration).from(key)))
+ .mapNotNull(NumericResponse::getOutput);
+ }
+
+ /**
+ * Expire a {@link List} of {@literal field} after a given {@link Duration} of time, measured in milliseconds, has
+ * passed.
+ *
+ * @param commands must not be {@literal null}.
+ * @return a {@link Flux} emitting the expiration results one by one, {@code 2} indicating the specific field is
+ * deleted already due to expiration, or provided expiry interval is 0; {@code 1} indicating expiration time
+ * is set/updated; {@code 0} indicating the expiration time is not set (a provided NX | XX | GT | LT condition
+ * is not met); {@code -2} indicating there is no such field;
+ * @since 3.5
+ * @see Redis Documentation: HEXPIRE
+ */
+ Flux> applyExpiration(Publisher commands);
+
+ /**
+ * Expire a given {@literal field} after a given {@link Duration} of time, measured in milliseconds, has passed.
+ *
+ * @param key must not be {@literal null}.
+ * @param field must not be {@literal null}.
+ * @param duration must not be {@literal null}.
+ * @return a {@link Mono} emitting the expiration result - {@code 2} indicating the specific field is deleted already
+ * due to expiration, or provided expiry interval is 0; {@code 1} indicating expiration time is set/updated;
+ * {@code 0} indicating the expiration time is not set (a provided NX | XX | GT | LT condition is not met);
+ * {@code -2} indicating there is no such field;
+ * @see Redis Documentation: HEXPIRE
+ * @since 3.5
+ */
+ default Mono hpExpire(ByteBuffer key, Duration duration, ByteBuffer field) {
+
+ Assert.notNull(duration, "Duration must not be null");
+
+ return hpExpire(key, duration, Collections.singletonList(field)).singleOrEmpty();
+ }
+
+ /**
+ * Expire a {@link List} of {@literal field} after a given {@link Duration} of time, measured in milliseconds, has
+ * passed.
+ *
+ * @param key must not be {@literal null}.
+ * @param fields must not be {@literal null}.
+ * @param duration must not be {@literal null}.
+ * @return a {@link Flux} emitting the expiration results one by one, {@code 2} indicating the specific field is
+ * deleted already due to expiration, or provided expiry interval is 0; {@code 1} indicating expiration time
+ * is set/updated; {@code 0} indicating the expiration time is not set (a provided NX | XX | GT | LT condition
+ * is not met); {@code -2} indicating there is no such field;
+ * @see Redis Documentation: HEXPIRE
+ * @since 3.5
+ */
+ default Flux hpExpire(ByteBuffer key, Duration duration, List fields) {
+
+ Assert.notNull(duration, "Duration must not be null");
+
+ return applyExpiration(Flux.just(new ExpireCommand(key, fields,
+ Expiration.from(duration.toMillis(), TimeUnit.MILLISECONDS), FieldExpirationOptions.none())))
+ .mapNotNull(NumericResponse::getOutput);
+ }
+
+ /**
+ * Expire a given {@literal field} in a given {@link Instant} of time, indicated as an absolute
+ * Unix timestamp in seconds since Unix epoch
+ *
+ * @param key must not be {@literal null}.
+ * @param field must not be {@literal null}.
+ * @param expireAt must not be {@literal null}.
+ * @return a {@link Mono} emitting the expiration result - {@code 2} indicating the specific field is deleted already
+ * due to expiration, or provided expiry interval is in the past; {@code 1} indicating expiration time is
+ * set/updated; {@code 0} indicating the expiration time is not set (a provided NX | XX | GT | LT condition is
+ * not met); {@code -2} indicating there is no such field;
+ * @see Redis Documentation: HEXPIREAT
+ * @since 3.5
+ */
+ default Mono hExpireAt(ByteBuffer key, Instant expireAt, ByteBuffer field) {
+
+ Assert.notNull(expireAt, "Duration must not be null");
+
+ return hExpireAt(key, expireAt, Collections.singletonList(field)).singleOrEmpty();
+ }
+
+ /**
+ * Expire a {@link List} of {@literal field} in a given {@link Instant} of time, indicated as an absolute
+ * Unix timestamp in seconds since Unix epoch
+ *
+ * @param key must not be {@literal null}.
+ * @param fields must not be {@literal null}.
+ * @param expireAt must not be {@literal null}.
+ * @return a {@link Flux} emitting the expiration results one by one, {@code 2} indicating the specific field is
+ * deleted already due to expiration, or provided expiry interval is in the past; {@code 1} indicating
+ * expiration time is set/updated; {@code 0} indicating the expiration time is not set (a provided NX | XX |
+ * GT | LT condition is not met); {@code -2} indicating there is no such field;
+ * @see Redis Documentation: HEXPIREAT
+ * @since 3.5
+ */
+ default Flux hExpireAt(ByteBuffer key, Instant expireAt, List fields) {
+
+ Assert.notNull(expireAt, "Duration must not be null");
+
+ return applyExpiration(Flux.just(ExpireCommand.expireAt(fields, expireAt, TimeUnit.SECONDS).from(key)))
+ .mapNotNull(NumericResponse::getOutput);
+ }
+
+ /**
+ * Expire a given {@literal field} in a given {@link Instant} of time, indicated as an absolute
+ * Unix timestamp in milliseconds since Unix epoch
+ *
+ * @param key must not be {@literal null}.
+ * @param field must not be {@literal null}.
+ * @param expireAt must not be {@literal null}.
+ * @return a {@link Mono} emitting the expiration result - {@code 2} indicating the specific field is deleted already
+ * due to expiration, or provided expiry interval is in the past; {@code 1} indicating expiration time is
+ * set/updated; {@code 0} indicating the expiration time is not set (a provided NX | XX | GT | LT condition is
+ * not met); {@code -2} indicating there is no such field;
+ * @see Redis Documentation: HPEXPIREAT
+ * @since 3.5
+ */
+ default Mono hpExpireAt(ByteBuffer key, Instant expireAt, ByteBuffer field) {
+
+ Assert.notNull(expireAt, "Duration must not be null");
+
+ return hpExpireAt(key, expireAt, Collections.singletonList(field)).singleOrEmpty();
+ }
+
+ /**
+ * Expire a {@link List} of {@literal field} in a given {@link Instant} of time, indicated as an absolute
+ * Unix timestamp in milliseconds since Unix epoch
+ *
+ * @param key must not be {@literal null}.
+ * @param fields must not be {@literal null}.
+ * @param expireAt must not be {@literal null}.
+ * @return a {@link Flux} emitting the expiration results one by one, {@code 2} indicating the specific field is
+ * deleted already due to expiration, or provided expiry interval is in the past; {@code 1} indicating
+ * expiration time is set/updated; {@code 0} indicating the expiration time is not set (a provided NX | XX |
+ * GT | LT condition is not met); {@code -2} indicating there is no such field;
+ * @see Redis Documentation: HPEXPIREAT
+ * @since 3.5
+ */
+ default Flux hpExpireAt(ByteBuffer key, Instant expireAt, List fields) {
+
+ Assert.notNull(expireAt, "Duration must not be null");
+
+ return applyExpiration(Flux.just(ExpireCommand.expireAt(fields, expireAt, TimeUnit.MILLISECONDS).from(key)))
+ .mapNotNull(NumericResponse::getOutput);
+ }
+
+ /**
+ * Persist a given {@literal field} removing any associated expiration, measured as absolute
+ * Unix timestamp in seconds since Unix epoch
+ *
+ * @param key must not be {@literal null}.
+ * @param field must not be {@literal null}.
+ * @return a {@link Mono} emitting the persist result - {@code 1} indicating expiration time is removed; {@code -1}
+ * field has no expiration time to be removed; {@code -2} indicating there is no such field;
+ * @see Redis Documentation: HPERSIST
+ * @since 3.5
+ */
+ default Mono hPersist(ByteBuffer key, ByteBuffer field) {
+ return hPersist(key, Collections.singletonList(field)).singleOrEmpty();
+ }
+
+ /**
+ * Persist a given {@link List} of {@literal field} removing any associated expiration.
+ *
+ * @param key must not be {@literal null}.
+ * @param fields must not be {@literal null}.
+ * @return a {@link Flux} emitting the persisting results one by one - {@code 1} indicating expiration time is
+ * removed; {@code -1} field has no expiration time to be removed; {@code -2} indicating there is no such
+ * field;
+ * @see Redis Documentation: HPERSIST
+ * @since 3.5
+ */
+ default Flux hPersist(ByteBuffer key, List fields) {
+ return hPersist(Flux.just(new HashFieldsCommand(key, fields))).mapNotNull(NumericResponse::getOutput);
+ }
+
+ /**
+ * Persist a given {@link List} of {@literal field} removing any associated expiration.
+ *
+ * @param commands must not be {@literal null}.
+ * @return a {@link Flux} emitting the persisting results one by one - {@code 1} indicating expiration time is
+ * removed; {@code -1} field has no expiration time to be removed; {@code -2} indicating there is no such
+ * field; * @since 3.5
+ * @see Redis Documentation: HPERSIST
+ */
+ Flux> hPersist(Publisher commands);
+
+ /**
+ * Returns the time-to-live of a given {@literal field} in seconds.
+ *
+ * @param key must not be {@literal null}.
+ * @param field must not be {@literal null}.
+ * @return a {@link Mono} emitting the TTL result - the time to live in seconds; or a negative value to signal an
+ * error. The command returns {@code -1} if the key exists but has no associated expiration time. The command
+ * returns {@code -2} if the key does not exist;
+ * @see Redis Documentation: HTTL
+ * @since 3.5
+ */
+ default Mono hTtl(ByteBuffer key, ByteBuffer field) {
+ return hTtl(key, Collections.singletonList(field)).singleOrEmpty();
+ }
+
+ /**
+ * Returns the time-to-live of all the given {@literal field} in the {@link List} in seconds.
+ *
+ * @param key must not be {@literal null}.
+ * @param fields must not be {@literal null}.
+ * @return a {@link Flux} emitting the TTL results one by one - the time to live in seconds; or a negative value to
+ * signal an error. The command returns {@code -1} if the key exists but has no associated expiration time.
+ * The command returns {@code -2} if the key does not exist;
+ * @see Redis Documentation: HTTL
+ * @since 3.5
+ */
+ default Flux hTtl(ByteBuffer key, List fields) {
+ return hTtl(Flux.just(new HashFieldsCommand(key, fields))).mapNotNull(NumericResponse::getOutput);
+ }
+
+ /**
+ * Returns the time-to-live of all the given {@literal field} in the {@link List} in seconds.
+ *
+ * @param commands must not be {@literal null}.
+ * @return a {@link Flux} emitting the persisting results one by one - the time to live in seconds; or a negative
+ * value to signal an error. The command returns {@code -1} if the key exists but has no associated expiration
+ * time. The command returns {@code -2} if the key does not exist;
+ * @since 3.5
+ * @see Redis Documentation: HTTL
+ */
+ Flux> hTtl(Publisher commands);
+
+ /**
+ * Returns the time-to-live of a given {@literal field} in milliseconds.
+ *
+ * @param key must not be {@literal null}.
+ * @param field must not be {@literal null}.
+ * @return a {@link Mono} emitting the TTL result - the time to live in milliseconds; or a negative value to signal an
+ * error. The command returns {@code -1} if the key exists but has no associated expiration time. The command
+ * returns {@code -2} if the key does not exist;
+ * @see Redis Documentation: HPTTL
+ * @since 3.5
+ */
+ default Mono hpTtl(ByteBuffer key, ByteBuffer field) {
+ return hpTtl(key, Collections.singletonList(field)).singleOrEmpty();
+ }
+
+ /**
+ * Returns the time-to-live of all the given {@literal field} in the {@link List} in milliseconds.
+ *
+ * @param key must not be {@literal null}.
+ * @param fields must not be {@literal null}.
+ * @return a {@link Flux} emitting the TTL results one by one - the time to live in milliseconds; or a negative value
+ * to signal an error. The command returns {@code -1} if the key exists but has no associated expiration time.
+ * The command returns {@code -2} if the key does not exist;
+ * @see Redis Documentation: HPTTL
+ * @since 3.5
+ */
+ default Flux hpTtl(ByteBuffer key, List fields) {
+ return hpTtl(Flux.just(new HashFieldsCommand(key, fields))).mapNotNull(NumericResponse::getOutput);
+ }
+
+ /**
+ * Returns the time-to-live of all the given {@literal field} in the {@link List} in milliseconds.
+ *
+ * @param commands must not be {@literal null}.
+ * @return a {@link Flux} emitting the persisting results one by one - the time to live in milliseconds; or a negative
+ * value to signal an error. The command returns {@code -1} if the key exists but has no associated expiration
+ * time. The command returns {@code -2} if the key does not exist;
+ * @since 3.5
+ * @see Redis Documentation: HPTTL
+ */
+ Flux> hpTtl(Publisher commands);
+
}
diff --git a/src/main/java/org/springframework/data/redis/connection/RedisHashCommands.java b/src/main/java/org/springframework/data/redis/connection/RedisHashCommands.java
index 6385c56a57..d038708526 100644
--- a/src/main/java/org/springframework/data/redis/connection/RedisHashCommands.java
+++ b/src/main/java/org/springframework/data/redis/connection/RedisHashCommands.java
@@ -15,13 +15,17 @@
*/
package org.springframework.data.redis.connection;
+import java.time.Duration;
import java.util.List;
import java.util.Map;
import java.util.Set;
+import java.util.concurrent.TimeUnit;
+import org.springframework.data.redis.connection.Hash.FieldExpirationOptions;
import org.springframework.data.redis.core.Cursor;
import org.springframework.data.redis.core.ScanOptions;
import org.springframework.lang.Nullable;
+import org.springframework.util.ObjectUtils;
/**
* Hash-specific commands supported by Redis.
@@ -29,6 +33,7 @@
* @author Costin Leau
* @author Christoph Strobl
* @author Mark Paluch
+ * @author Tihomir Mateev
*/
public interface RedisHashCommands {
@@ -249,4 +254,314 @@ public interface RedisHashCommands {
*/
@Nullable
Long hStrLen(byte[] key, byte[] field);
+
+ /**
+ * Apply a given {@link org.springframework.data.redis.core.types.Expiration} to the given {@literal fields}.
+ *
+ * @param key must not be {@literal null}.
+ * @param expiration the {@link org.springframework.data.redis.core.types.Expiration} to apply.
+ * @param fields the names of the {@literal fields} to apply the {@literal expiration} to.
+ * @return a {@link List} holding the command result for each field in order - {@code 2} indicating the specific field
+ * is deleted already due to expiration, or provided expiry interval is 0; {@code 1} indicating expiration
+ * time is set/updated; {@code 0} indicating the expiration time is not set; {@code -2} indicating there is no
+ * such field;
+ * @since 3.5
+ */
+ default @Nullable List applyExpiration(byte[] key,
+ org.springframework.data.redis.core.types.Expiration expiration, byte[]... fields) {
+ return applyExpiration(key, expiration, FieldExpirationOptions.none(), fields);
+ }
+
+ /**
+ * @param key must not be {@literal null}.
+ * @param expiration the {@link org.springframework.data.redis.core.types.Expiration} to apply.
+ * @param options additional options to be sent along with the command.
+ * @param fields the names of the {@literal fields} to apply the {@literal expiration} to.
+ * @return a {@link List} holding the command result for each field in order - {@code 2} indicating the specific field
+ * is deleted already due to expiration, or provided expiry interval is 0; {@code 1} indicating expiration
+ * time is set/updated; {@code 0} indicating the expiration time is not set (a provided NX | XX | GT | LT
+ * condition is not met); {@code -2} indicating there is no such field;
+ * @since 3.5
+ */
+ @Nullable
+ default List applyExpiration(byte[] key, org.springframework.data.redis.core.types.Expiration expiration,
+ FieldExpirationOptions options, byte[]... fields) {
+
+ if (expiration.isPersistent()) {
+ return hPersist(key, fields);
+ }
+
+ if (ObjectUtils.nullSafeEquals(FieldExpirationOptions.none(), options)) {
+ if (ObjectUtils.nullSafeEquals(TimeUnit.MILLISECONDS, expiration.getTimeUnit())) {
+ if (expiration.isUnixTimestamp()) {
+ return hpExpireAt(key, expiration.getExpirationTimeInMilliseconds(), fields);
+ }
+ return hpExpire(key, expiration.getExpirationTimeInMilliseconds(), fields);
+ }
+ if (expiration.isUnixTimestamp()) {
+ return hExpireAt(key, expiration.getExpirationTimeInSeconds(), fields);
+ }
+ return hExpire(key, expiration.getExpirationTimeInSeconds(), fields);
+ }
+
+ if (ObjectUtils.nullSafeEquals(TimeUnit.MILLISECONDS, expiration.getTimeUnit())) {
+ if (expiration.isUnixTimestamp()) {
+ return hpExpireAt(key, expiration.getExpirationTimeInMilliseconds(), options.getCondition(), fields);
+ }
+
+ return hpExpire(key, expiration.getExpirationTimeInMilliseconds(), options.getCondition(), fields);
+ }
+
+ if (expiration.isUnixTimestamp()) {
+ return hExpireAt(key, expiration.getExpirationTimeInSeconds(), options.getCondition(), fields);
+ }
+
+ return hExpire(key, expiration.getExpirationTimeInSeconds(), options.getCondition(), fields);
+ }
+
+ /**
+ * Set time to live for given {@code fields} in seconds.
+ *
+ * @param key must not be {@literal null}.
+ * @param seconds the amount of time after which the fields will be expired in seconds, must not be {@literal null}.
+ * @param fields must not be {@literal null}.
+ * @return a list of {@link Long} values for each of the fields provided: {@code 2} indicating the specific field is
+ * deleted already due to expiration, or provided expiry interval is 0; {@code 1} indicating expiration time
+ * is set/updated; {@code 0} indicating the expiration time is not set; {@code -2} indicating there is no such
+ * field; {@literal null} when used in pipeline / transaction.
+ * @see Redis Documentation: HEXPIRE
+ * @since 3.5
+ */
+ @Nullable
+ default List hExpire(byte[] key, long seconds, byte[]... fields) {
+ return hExpire(key, seconds, FieldExpirationOptions.Condition.ALWAYS, fields);
+ }
+
+ /**
+ * Set time to live for given {@code fields}.
+ *
+ * @param key must not be {@literal null}.
+ * @param ttl the amount of time after which the fields will be expired in {@link Duration#toSeconds() seconds}
+ * precision, must not be {@literal null}.
+ * @param fields must not be {@literal null}.
+ * @return a list of {@link Long} values for each of the fields provided: {@code 2} indicating the specific field is
+ * deleted already due to expiration, or provided expiry interval is 0; {@code 1} indicating expiration time
+ * is set/updated; {@code 0} indicating the expiration time is not set; {@code -2} indicating there is no such
+ * field; {@literal null} when used in pipeline / transaction.
+ * @see Redis Documentation: HEXPIRE
+ * @since 3.5
+ */
+ @Nullable
+ default List hExpire(byte[] key, Duration ttl, byte[]... fields) {
+ return hExpire(key, ttl.toSeconds(), fields);
+ }
+
+ /**
+ * Set time to live for given {@code fields} in seconds.
+ *
+ * @param key must not be {@literal null}.
+ * @param seconds the amount of time after which the fields will be expired in seconds, must not be {@literal null}.
+ * @param fields must not be {@literal null}.
+ * @param condition the condition for expiration, must not be {@literal null}.
+ * @return a list of {@link Long} values for each of the fields provided: {@code 2} indicating the specific field is
+ * deleted already due to expiration, or provided expiry interval is 0; {@code 1} indicating expiration time
+ * is set/updated; {@code 0} indicating the expiration time is not set (a provided NX | XX | GT | LT condition
+ * is not met); {@code -2} indicating there is no such field; {@literal null} when used in pipeline /
+ * transaction.
+ * @see Redis Documentation: HEXPIRE
+ * @since 3.5
+ */
+ @Nullable
+ List hExpire(byte[] key, long seconds, FieldExpirationOptions.Condition condition, byte[]... fields);
+
+ /**
+ * Set time to live for given {@code fields} in milliseconds.
+ *
+ * @param key must not be {@literal null}.
+ * @param millis the amount of time after which the fields will be expired in milliseconds, must not be
+ * {@literal null}.
+ * @param fields must not be {@literal null}.
+ * @return a list of {@link Long} values for each of the fields provided: {@code 2} indicating the specific field is
+ * deleted already due to expiration, or provided expiry interval is 0; {@code 1} indicating expiration time
+ * is set/updated; {@code 0} indicating the expiration time is not set ; {@code -2} indicating there is no
+ * such field; {@literal null} when used in pipeline / transaction.
+ * @see Redis Documentation: HPEXPIRE
+ * @since 3.5
+ */
+ @Nullable
+ default List hpExpire(byte[] key, long millis, byte[]... fields) {
+ return hpExpire(key, millis, FieldExpirationOptions.Condition.ALWAYS, fields);
+ }
+
+ /**
+ * Set time to live for given {@code fields} in milliseconds.
+ *
+ * @param key must not be {@literal null}.
+ * @param ttl the amount of time after which the fields will be expired in {@link Duration#toMillis() milliseconds}
+ * precision, must not be {@literal null}.
+ * @param fields must not be {@literal null}.
+ * @return a list of {@link Long} values for each of the fields provided: {@code 2} indicating the specific field is
+ * deleted already due to expiration, or provided expiry interval is 0; {@code 1} indicating expiration time
+ * is set/updated; {@code 0} indicating the expiration time is not set; {@code -2} indicating there is no such
+ * field; {@literal null} when used in pipeline / transaction.
+ * @see Redis Documentation: HPEXPIRE
+ * @since 3.5
+ */
+ @Nullable
+ default List hpExpire(byte[] key, Duration ttl, byte[]... fields) {
+ return hpExpire(key, ttl.toMillis(), fields);
+ }
+
+ /**
+ * Set time to live for given {@code fields} in milliseconds.
+ *
+ * @param key must not be {@literal null}.
+ * @param millis the amount of time after which the fields will be expired in milliseconds, must not be
+ * {@literal null}.
+ * @param condition the condition for expiration, must not be {@literal null}.
+ * @param fields must not be {@literal null}.
+ * @return a list of {@link Long} values for each of the fields provided: {@code 2} indicating the specific field is
+ * deleted already due to expiration, or provided expiry interval is 0; {@code 1} indicating expiration time
+ * is set/updated; {@code 0} indicating the expiration time is not set (a provided NX | XX | GT | LT condition
+ * is not met); {@code -2} indicating there is no such field; {@literal null} when used in pipeline /
+ * transaction.
+ * @see Redis Documentation: HPEXPIRE
+ * @since 3.5
+ */
+ @Nullable
+ List hpExpire(byte[] key, long millis, FieldExpirationOptions.Condition condition, byte[]... fields);
+
+ /**
+ * Set the expiration for given {@code field} as a {@literal UNIX} timestamp.
+ *
+ * @param key must not be {@literal null}.
+ * @param unixTime the moment in time in which the field expires, must not be {@literal null}.
+ * @param fields must not be {@literal null}.
+ * @return a list of {@link Long} values for each of the fields provided: {@code 2} indicating the specific field is
+ * deleted already due to expiration, or provided expiry interval is in the past; {@code 1} indicating
+ * expiration time is set/updated; {@code 0} indicating the expiration time is not set; {@code -2} indicating
+ * there is no such field; {@literal null} when used in pipeline / transaction.
+ * @see Redis Documentation: HEXPIREAT
+ * @since 3.5
+ */
+ @Nullable
+ default List hExpireAt(byte[] key, long unixTime, byte[]... fields) {
+ return hExpireAt(key, unixTime, FieldExpirationOptions.Condition.ALWAYS, fields);
+ }
+
+ /**
+ * Set the expiration for given {@code field} as a {@literal UNIX} timestamp.
+ *
+ * @param key must not be {@literal null}.
+ * @param unixTime the moment in time in which the field expires, must not be {@literal null}.
+ * @param condition the condition for expiration, must not be {@literal null}.
+ * @param fields must not be {@literal null}.
+ * @return a list of {@link Long} values for each of the fields provided: {@code 2} indicating the specific field is
+ * deleted already due to expiration, or provided expiry interval is in the past; {@code 1} indicating
+ * expiration time is set/updated; {@code 0} indicating the expiration time is not set (a provided NX | XX |
+ * GT | LT condition is not met); {@code -2} indicating there is no such field; {@literal null} when used in
+ * pipeline / transaction.
+ * @see Redis Documentation: HEXPIREAT
+ * @since 3.5
+ */
+ @Nullable
+ List hExpireAt(byte[] key, long unixTime, FieldExpirationOptions.Condition condition, byte[]... fields);
+
+ /**
+ * Set the expiration for given {@code field} as a {@literal UNIX} timestamp in milliseconds.
+ *
+ * @param key must not be {@literal null}.
+ * @param unixTimeInMillis the moment in time in which the field expires in milliseconds, must not be {@literal null}.
+ * @param fields must not be {@literal null}.
+ * @return a list of {@link Long} values for each of the fields provided: {@code 2} indicating the specific field is
+ * deleted already due to expiration, or provided expiry interval is in the past; {@code 1} indicating
+ * expiration time is set/updated; {@code 0} indicating the expiration time is not set; {@code -2} indicating
+ * there is no such field; {@literal null} when used in pipeline / transaction.
+ * @see Redis Documentation: HPEXPIREAT
+ * @since 3.5
+ */
+ @Nullable
+ default List hpExpireAt(byte[] key, long unixTimeInMillis, byte[]... fields) {
+ return hpExpireAt(key, unixTimeInMillis, FieldExpirationOptions.Condition.ALWAYS, fields);
+ }
+
+ /**
+ * Set the expiration for given {@code field} as a {@literal UNIX} timestamp in milliseconds.
+ *
+ * @param key must not be {@literal null}.
+ * @param unixTimeInMillis the moment in time in which the field expires in milliseconds, must not be {@literal null}.
+ * @param condition the condition for expiration, must not be {@literal null}.
+ * @param fields must not be {@literal null}.
+ * @return a list of {@link Long} values for each of the fields provided: {@code 2} indicating the specific field is
+ * deleted already due to expiration, or provided expiry interval is in the past; {@code 1} indicating
+ * expiration time is set/updated; {@code 0} indicating the expiration time is not set (a provided NX | XX |
+ * GT | LT condition is not met); {@code -2} indicating there is no such field; {@literal null} when used in
+ * pipeline / transaction.
+ * @see Redis Documentation: HPEXPIREAT
+ * @since 3.5
+ */
+ @Nullable
+ List hpExpireAt(byte[] key, long unixTimeInMillis, FieldExpirationOptions.Condition condition,
+ byte[]... fields);
+
+ /**
+ * Remove the expiration from given {@code field}.
+ *
+ * @param key must not be {@literal null}.
+ * @param fields must not be {@literal null}.
+ * @return a list of {@link Long} values for each of the fields provided: {@code 1} indicating expiration time is
+ * removed; {@code -1} field has no expiration time to be removed; {@code -2} indicating there is no such
+ * field; {@literal null} when used in pipeline / transaction.{@literal null} when used in pipeline /
+ * transaction.
+ * @see Redis Documentation: HPERSIST
+ * @since 3.5
+ */
+ @Nullable
+ List hPersist(byte[] key, byte[]... fields);
+
+ /**
+ * Get the time to live for {@code fields} in seconds.
+ *
+ * @param key must not be {@literal null}.
+ * @param fields must not be {@literal null}.
+ * @return a list of {@link Long} values for each of the fields provided: the time to live in seconds; or a negative
+ * value to signal an error. The command returns {@code -1} if the field exists but has no associated
+ * expiration time. The command returns {@code -2} if the field does not exist; {@literal null} when used in
+ * pipeline / transaction.
+ * @see Redis Documentation: HTTL
+ * @since 3.5
+ */
+ @Nullable
+ List hTtl(byte[] key, byte[]... fields);
+
+ /**
+ * Get the time to live for {@code fields} in and convert it to the given {@link TimeUnit}.
+ *
+ * @param key must not be {@literal null}.
+ * @param timeUnit must not be {@literal null}.
+ * @param fields must not be {@literal null}.
+ * @return for each of the fields supplied - the time to live in the {@link TimeUnit} provided; or a negative value to
+ * signal an error. The command returns {@code -1} if the key exists but has no associated expiration time.
+ * The command returns {@code -2} if the key does not exist; {@literal null} when used in pipeline /
+ * transaction.
+ * @see Redis Documentation: HTTL
+ * @since 3.5
+ */
+ @Nullable
+ List hTtl(byte[] key, TimeUnit timeUnit, byte[]... fields);
+
+ /**
+ * Get the time to live for {@code fields} in milliseconds.
+ *
+ * @param key must not be {@literal null}.
+ * @param fields must not be {@literal null}.
+ * @return a list of {@link Long} values for each of the fields provided: the time to live in seconds; or a negative
+ * value to signal an error. The command returns {@code -1} if the key exists but has no associated expiration
+ * time. The command returns {@code -2} if the key does not exist; {@literal null} when used in pipeline /
+ * transaction.
+ * @see Redis Documentation: HTTL
+ * @since 3.5
+ */
+ @Nullable
+ List hpTtl(byte[] key, byte[]... fields);
}
diff --git a/src/main/java/org/springframework/data/redis/connection/RedisKeyCommands.java b/src/main/java/org/springframework/data/redis/connection/RedisKeyCommands.java
index 414f178d92..49326637d3 100644
--- a/src/main/java/org/springframework/data/redis/connection/RedisKeyCommands.java
+++ b/src/main/java/org/springframework/data/redis/connection/RedisKeyCommands.java
@@ -16,6 +16,7 @@
package org.springframework.data.redis.connection;
import java.time.Duration;
+import java.time.Instant;
import java.util.List;
import java.util.Set;
import java.util.concurrent.TimeUnit;
@@ -191,6 +192,20 @@ default Cursor scan(KeyScanOptions options) {
@Nullable
Boolean expire(byte[] key, long seconds);
+ /**
+ * Set time to live for given {@code key} using {@link Duration#toSeconds() seconds} precision.
+ *
+ * @param key must not be {@literal null}.
+ * @param duration
+ * @return {@literal null} when used in pipeline / transaction.
+ * @see Redis Documentation: EXPIRE
+ * @since 3.5
+ */
+ @Nullable
+ default Boolean expire(byte[] key, Duration duration) {
+ return expire(key, duration.toSeconds());
+ }
+
/**
* Set time to live for given {@code key} in milliseconds.
*
@@ -202,6 +217,20 @@ default Cursor scan(KeyScanOptions options) {
@Nullable
Boolean pExpire(byte[] key, long millis);
+ /**
+ * Set time to live for given {@code key} using {@link Duration#toMillis() milliseconds} precision.
+ *
+ * @param key must not be {@literal null}.
+ * @param duration
+ * @return {@literal null} when used in pipeline / transaction.
+ * @see Redis Documentation: PEXPIRE
+ * @since 3.5
+ */
+ @Nullable
+ default Boolean pExpire(byte[] key, Duration duration) {
+ return pExpire(key, duration.toMillis());
+ }
+
/**
* Set the expiration for given {@code key} as a {@literal UNIX} timestamp.
*
@@ -213,6 +242,21 @@ default Cursor scan(KeyScanOptions options) {
@Nullable
Boolean expireAt(byte[] key, long unixTime);
+ /**
+ * Set the expiration for given {@code key} as a {@literal UNIX} timestamp in {@link Instant#getEpochSecond() seconds}
+ * precision.
+ *
+ * @param key must not be {@literal null}.
+ * @param unixTime
+ * @return {@literal null} when used in pipeline / transaction.
+ * @see Redis Documentation: EXPIREAT
+ * @since 3.5
+ */
+ @Nullable
+ default Boolean expireAt(byte[] key, Instant unixTime) {
+ return expireAt(key, unixTime.getEpochSecond());
+ }
+
/**
* Set the expiration for given {@code key} as a {@literal UNIX} timestamp in milliseconds.
*
@@ -224,6 +268,21 @@ default Cursor scan(KeyScanOptions options) {
@Nullable
Boolean pExpireAt(byte[] key, long unixTimeInMillis);
+ /**
+ * Set the expiration for given {@code key} as a {@literal UNIX} timestamp in {@link Instant#toEpochMilli()
+ * milliseconds} precision.
+ *
+ * @param key must not be {@literal null}.
+ * @param unixTime
+ * @return {@literal null} when used in pipeline / transaction.
+ * @see Redis Documentation: PEXPIREAT
+ * @since 3.5
+ */
+ @Nullable
+ default Boolean pExpireAt(byte[] key, Instant unixTime) {
+ return pExpireAt(key, unixTime.toEpochMilli());
+ }
+
/**
* Remove the expiration from given {@code key}.
*
diff --git a/src/main/java/org/springframework/data/redis/connection/StringRedisConnection.java b/src/main/java/org/springframework/data/redis/connection/StringRedisConnection.java
index 2c286ce97e..1069e430c8 100644
--- a/src/main/java/org/springframework/data/redis/connection/StringRedisConnection.java
+++ b/src/main/java/org/springframework/data/redis/connection/StringRedisConnection.java
@@ -71,7 +71,6 @@
* @author Andrey Shlykov
* @author ihaohong
* @author Shyngys Sapraliyev
- *
* @see RedisCallback
* @see RedisSerializer
* @see StringRedisTemplate
@@ -1661,7 +1660,6 @@ default Long lPos(String key, String element) {
*/
Long zRemRange(String key, long start, long end);
-
/**
* Remove all elements between the lexicographical {@link Range}.
*
@@ -1941,7 +1939,8 @@ default Set zUnionWithScores(Aggregate aggregate, int[] weights, St
* @return
* @since 1.6
* @see Redis Documentation: ZRANGEBYLEX
- * @see RedisZSetCommands#zRangeByLex(byte[], org.springframework.data.domain.Range, org.springframework.data.redis.connection.Limit)
+ * @see RedisZSetCommands#zRangeByLex(byte[], org.springframework.data.domain.Range,
+ * org.springframework.data.redis.connection.Limit)
*/
Set zRangeByLex(String key, org.springframework.data.domain.Range range,
org.springframework.data.redis.connection.Limit limit);
@@ -1983,7 +1982,8 @@ default Set zRevRangeByLex(String key, org.springframework.data.domain.R
* @return
* @since 2.4
* @see Redis Documentation: ZREVRANGEBYLEX
- * @see RedisZSetCommands#zRevRangeByLex(byte[], org.springframework.data.domain.Range, org.springframework.data.redis.connection.Limit)
+ * @see RedisZSetCommands#zRevRangeByLex(byte[], org.springframework.data.domain.Range,
+ * org.springframework.data.redis.connection.Limit)
*/
Set zRevRangeByLex(String key, org.springframework.data.domain.Range range,
org.springframework.data.redis.connection.Limit limit);
@@ -2333,6 +2333,208 @@ Long zRangeStoreRevByScore(String dstKey, String srcKey,
@Nullable
Long hStrLen(String key, String field);
+ /**
+ * Set time to live for given {@code field} in seconds.
+ *
+ * @param key must not be {@literal null}.
+ * @param seconds the amount of time after which the key will be expired in seconds, must not be {@literal null}.
+ * @param fields must not be {@literal null}.
+ * @return a list of {@link Long} values for each of the fields provided: {@code 2} indicating the specific field is
+ * deleted already due to expiration, or provided expiry interval is 0; {@code 1} indicating expiration time
+ * is set/updated; {@code 0} indicating the expiration time is not set; {@code -2} indicating there is no such
+ * field; {@literal null} when used in pipeline / transaction.
+ * @see Redis Documentation: HEXPIRE
+ * @since 3.5
+ */
+ @Nullable
+ default List hExpire(String key, long seconds, String... fields) {
+ return hExpire(key, seconds, Hash.FieldExpirationOptions.Condition.ALWAYS, fields);
+ }
+
+ /**
+ * Set time to live for given {@code field} in seconds.
+ *
+ * @param key must not be {@literal null}.
+ * @param seconds the amount of time after which the key will be expired in seconds, must not be {@literal null}.
+ * @param fields must not be {@literal null}.
+ * @return a list of {@link Long} values for each of the fields provided: {@code 2} indicating the specific field is
+ * deleted already due to expiration, or provided expiry interval is 0; {@code 1} indicating expiration time
+ * is set/updated; {@code 0} indicating the expiration time is not set (a provided NX | XX | GT | LT condition
+ * is not met); {@code -2} indicating there is no such field; {@literal null} when used in pipeline /
+ * transaction.
+ * @see Redis Documentation: HEXPIRE
+ * @since 3.5
+ */
+ @Nullable
+ List hExpire(String key, long seconds, Hash.FieldExpirationOptions.Condition condition, String... fields);
+
+ /**
+ * Set time to live for given {@code field} in milliseconds.
+ *
+ * @param key must not be {@literal null}.
+ * @param millis the amount of time after which the key will be expired in milliseconds, must not be {@literal null}.
+ * @param fields must not be {@literal null}.
+ * @return a list of {@link Long} values for each of the fields provided: {@code 2} indicating the specific field is
+ * deleted already due to expiration, or provided expiry interval is 0; {@code 1} indicating expiration time
+ * is set/updated; {@code 0} indicating the expiration time is not set; {@code -2} indicating there is no such
+ * field; {@literal null} when used in pipeline / transaction.
+ * @see Redis Documentation: HPEXPIRE
+ * @since 3.5
+ */
+ @Nullable
+ default List hpExpire(String key, long millis, String... fields) {
+ return hpExpire(key, millis, Hash.FieldExpirationOptions.Condition.ALWAYS, fields);
+ }
+
+ /**
+ * Set time to live for given {@code field} in milliseconds.
+ *
+ * @param key must not be {@literal null}.
+ * @param millis the amount of time after which the key will be expired in milliseconds, must not be {@literal null}.
+ * @param fields must not be {@literal null}.
+ * @return a list of {@link Long} values for each of the fields provided: {@code 2} indicating the specific field is
+ * deleted already due to expiration, or provided expiry interval is 0; {@code 1} indicating expiration time
+ * is set/updated; {@code 0} indicating the expiration time is not set (a provided NX | XX | GT | LT condition
+ * is not met); {@code -2} indicating there is no such field; {@literal null} when used in pipeline /
+ * transaction.
+ * @see Redis Documentation: HPEXPIRE
+ * @since 3.5
+ */
+ @Nullable
+ List hpExpire(String key, long millis, Hash.FieldExpirationOptions.Condition condition, String... fields);
+
+ /**
+ * Set the expiration for given {@code field} as a {@literal UNIX} timestamp.
+ *
+ * @param key must not be {@literal null}.
+ * @param unixTime the moment in time in which the field expires, must not be {@literal null}.
+ * @param fields must not be {@literal null}.
+ * @return a list of {@link Long} values for each of the fields provided: {@code 2} indicating the specific field is
+ * deleted already due to expiration, or provided expiry interval is in the past; {@code 1} indicating
+ * expiration time is set/updated; {@code 0} indicating the expiration time is not set; {@code -2} indicating
+ * there is no such field; {@literal null} when used in pipeline / transaction.
+ * @see Redis Documentation: HEXPIREAT
+ * @since 3.5
+ */
+ @Nullable
+ default List hExpireAt(String key, long unixTime, String... fields) {
+ return hExpireAt(key, unixTime, Hash.FieldExpirationOptions.Condition.ALWAYS, fields);
+ }
+
+ /**
+ * Set the expiration for given {@code field} as a {@literal UNIX} timestamp.
+ *
+ * @param key must not be {@literal null}.
+ * @param unixTime the moment in time in which the field expires, must not be {@literal null}.
+ * @param fields must not be {@literal null}.
+ * @return a list of {@link Long} values for each of the fields provided: {@code 2} indicating the specific field is
+ * deleted already due to expiration, or provided expiry interval is in the past; {@code 1} indicating
+ * expiration time is set/updated; {@code 0} indicating the expiration time is not set (a provided NX | XX |
+ * GT | LT condition is not met); {@code -2} indicating there is no such field; {@literal null} when used in
+ * pipeline / transaction.
+ * @see Redis Documentation: HEXPIREAT
+ * @since 3.5
+ */
+ @Nullable
+ List hExpireAt(String key, long unixTime, Hash.FieldExpirationOptions.Condition condition, String... fields);
+
+ /**
+ * Set the expiration for given {@code field} as a {@literal UNIX} timestamp in milliseconds.
+ *
+ * @param key must not be {@literal null}.
+ * @param unixTimeInMillis the moment in time in which the field expires in milliseconds, must not be {@literal null}.
+ * @param fields must not be {@literal null}.
+ * @return a list of {@link Long} values for each of the fields provided: {@code 2} indicating the specific field is
+ * deleted already due to expiration, or provided expiry interval is in the past; {@code 1} indicating
+ * expiration time is set/updated; {@code 0} indicating the expiration time is not set; {@code -2} indicating
+ * there is no such field; {@literal null} when used in pipeline / transaction.
+ * @see Redis Documentation: HPEXPIREAT
+ * @since 3.5
+ */
+ @Nullable
+ default List hpExpireAt(String key, long unixTimeInMillis, String... fields) {
+ return hpExpireAt(key, unixTimeInMillis, Hash.FieldExpirationOptions.Condition.ALWAYS, fields);
+ }
+
+ /**
+ * Set the expiration for given {@code field} as a {@literal UNIX} timestamp in milliseconds.
+ *
+ * @param key must not be {@literal null}.
+ * @param unixTimeInMillis the moment in time in which the field expires in milliseconds, must not be {@literal null}.
+ * @param fields must not be {@literal null}.
+ * @return a list of {@link Long} values for each of the fields provided: {@code 2} indicating the specific field is
+ * deleted already due to expiration, or provided expiry interval is in the past; {@code 1} indicating
+ * expiration time is set/updated; {@code 0} indicating the expiration time is not set (a provided NX | XX |
+ * GT | LT condition is not met); {@code -2} indicating there is no such field; {@literal null} when used in
+ * pipeline / transaction.
+ * @see Redis Documentation: HPEXPIREAT
+ * @since 3.5
+ */
+ @Nullable
+ List hpExpireAt(String key, long unixTimeInMillis, Hash.FieldExpirationOptions.Condition condition,
+ String... fields);
+
+ /**
+ * Remove the expiration from given {@code field}.
+ *
+ * @param key must not be {@literal null}.
+ * @param fields must not be {@literal null}.
+ * @return a list of {@link Long} values for each of the fields provided: {@code 1} indicating expiration time is
+ * removed; {@code -1} field has no expiration time to be removed; {@code -2} indicating there is no such
+ * field; {@literal null} when used in pipeline / transaction.{@literal null} when used in pipeline /
+ * transaction.
+ * @see Redis Documentation: HPERSIST
+ * @since 3.5
+ */
+ @Nullable
+ List hPersist(String key, String... fields);
+
+ /**
+ * Get the time to live for {@code fields} in seconds.
+ *
+ * @param key must not be {@literal null}.
+ * @param fields must not be {@literal null}.
+ * @return a list of {@link Long} values for each of the fields provided: the time to live in milliseconds; or a
+ * negative value to signal an error. The command returns {@code -1} if the key exists but has no associated
+ * expiration time. The command returns {@code -2} if the key does not exist; {@literal null} when used in
+ * pipeline / transaction.
+ * @see Redis Documentation: HTTL
+ * @since 3.5
+ */
+ @Nullable
+ List hTtl(String key, String... fields);
+
+ /**
+ * Get the time to live for {@code fields} in and convert it to the given {@link TimeUnit}.
+ *
+ * @param key must not be {@literal null}.
+ * @param timeUnit must not be {@literal null}.
+ * @param fields must not be {@literal null}.
+ * @return a list of {@link Long} values for each of the fields provided: the time to live in the {@link TimeUnit}
+ * provided; or a negative value to signal an error. The command returns {@code -1} if the key exists but has
+ * no associated expiration time. The command returns {@code -2} if the key does not exist; {@literal null}
+ * when used in pipeline / transaction.
+ * @see Redis Documentation: HTTL
+ * @since 3.5
+ */
+ @Nullable
+ List hTtl(String key, TimeUnit timeUnit, String... fields);
+
+ /**
+ * Get the time to live for {@code fields} in seconds.
+ *
+ * @param key must not be {@literal null}.
+ * @param fields must not be {@literal null}.
+ * @return a list of {@link Long} values for each of the fields provided: the time to live in milliseconds; or a
+ * negative value to signal an error. The command returns {@code -1} if the key exists but has no associated
+ * expiration time. The command returns {@code -2} if the key does not exist; {@literal null} when used in
+ * pipeline / transaction.
+ * @see Redis Documentation: HTTL
+ * @since 3.5
+ */
+ @Nullable
+ List hpTtl(String key, String... fields);
+
// -------------------------------------------------------------------------
// Methods dealing with HyperLogLog
// -------------------------------------------------------------------------
@@ -2556,8 +2758,7 @@ GeoResults> geoRadiusByMember(String key, String member, Dis
/**
* Return the members of a geo set which are within the borders of the area specified by a given {@link GeoShape
- * shape}. The query's center point is provided by
- * {@link GeoReference}.
+ * shape}. The query's center point is provided by {@link GeoReference}.
*
* @param key must not be {@literal null}.
* @param reference must not be {@literal null}.
@@ -2573,8 +2774,7 @@ GeoResults> geoSearch(String key, GeoReference refer
/**
* Query the members of a geo set which are within the borders of the area specified by a given {@link GeoShape shape}
- * and store the result at {@code destKey}. The query's center point is provided by
- * {@link GeoReference}.
+ * and store the result at {@code destKey}. The query's center point is provided by {@link GeoReference}.
*
* @param key must not be {@literal null}.
* @param reference must not be {@literal null}.
diff --git a/src/main/java/org/springframework/data/redis/connection/jedis/JedisClusterHashCommands.java b/src/main/java/org/springframework/data/redis/connection/jedis/JedisClusterHashCommands.java
index 47ad6c6eec..1223ab4c06 100644
--- a/src/main/java/org/springframework/data/redis/connection/jedis/JedisClusterHashCommands.java
+++ b/src/main/java/org/springframework/data/redis/connection/jedis/JedisClusterHashCommands.java
@@ -15,6 +15,7 @@
*/
package org.springframework.data.redis.connection.jedis;
+import redis.clients.jedis.args.ExpiryOption;
import redis.clients.jedis.params.ScanParams;
import redis.clients.jedis.resps.ScanResult;
@@ -23,8 +24,10 @@
import java.util.Map;
import java.util.Map.Entry;
import java.util.Set;
+import java.util.concurrent.TimeUnit;
import org.springframework.dao.DataAccessException;
+import org.springframework.data.redis.connection.Hash.FieldExpirationOptions;
import org.springframework.data.redis.connection.RedisHashCommands;
import org.springframework.data.redis.core.Cursor;
import org.springframework.data.redis.core.ScanCursor;
@@ -39,6 +42,7 @@
* @author Christoph Strobl
* @author Mark Paluch
* @author John Blum
+ * @author Tihomir Mateev
* @since 2.0
*/
class JedisClusterHashCommands implements RedisHashCommands {
@@ -279,14 +283,136 @@ protected ScanIteration> doScan(CursorId cursorId, ScanOpt
ScanParams params = JedisConverters.toScanParams(options);
- ScanResult> result = connection.getCluster().hscan(key,
- JedisConverters.toBytes(cursorId),
+ ScanResult> result = connection.getCluster().hscan(key, JedisConverters.toBytes(cursorId),
params);
return new ScanIteration<>(CursorId.of(result.getCursor()), result.getResult());
}
}.open();
}
+ @Override
+ public List hExpire(byte[] key, long seconds, FieldExpirationOptions.Condition condition, byte[]... fields) {
+
+ Assert.notNull(key, "Key must not be null");
+ Assert.notNull(fields, "Fields must not be null");
+
+ try {
+ if (condition == FieldExpirationOptions.Condition.ALWAYS) {
+ return connection.getCluster().hexpire(key, seconds, fields);
+ }
+
+ return connection.getCluster().hexpire(key, seconds, ExpiryOption.valueOf(condition.name()), fields);
+ } catch (Exception ex) {
+ throw convertJedisAccessException(ex);
+ }
+ }
+
+ @Override
+ public List hpExpire(byte[] key, long millis, FieldExpirationOptions.Condition condition, byte[]... fields) {
+
+ Assert.notNull(key, "Key must not be null");
+ Assert.notNull(fields, "Fields must not be null");
+
+ try {
+ if (condition == FieldExpirationOptions.Condition.ALWAYS) {
+ return connection.getCluster().hpexpire(key, millis, fields);
+ }
+
+ return connection.getCluster().hpexpire(key, millis, ExpiryOption.valueOf(condition.name()), fields);
+ } catch (Exception ex) {
+ throw convertJedisAccessException(ex);
+ }
+ }
+
+ @Override
+ public List hExpireAt(byte[] key, long unixTime, FieldExpirationOptions.Condition condition, byte[]... fields) {
+ Assert.notNull(key, "Key must not be null");
+ Assert.notNull(fields, "Fields must not be null");
+
+ try {
+
+ if (condition == FieldExpirationOptions.Condition.ALWAYS) {
+ return connection.getCluster().hexpireAt(key, unixTime, fields);
+ }
+
+ return connection.getCluster().hexpireAt(key, unixTime, ExpiryOption.valueOf(condition.name()), fields);
+ } catch (Exception ex) {
+ throw convertJedisAccessException(ex);
+ }
+ }
+
+ @Override
+ public List hpExpireAt(byte[] key, long unixTimeInMillis, FieldExpirationOptions.Condition condition,
+ byte[]... fields) {
+
+ Assert.notNull(key, "Key must not be null");
+ Assert.notNull(fields, "Fields must not be null");
+
+ try {
+
+ if (condition == FieldExpirationOptions.Condition.ALWAYS) {
+ return connection.getCluster().hpexpireAt(key, unixTimeInMillis, fields);
+ }
+
+ return connection.getCluster().hpexpireAt(key, unixTimeInMillis, ExpiryOption.valueOf(condition.name()), fields);
+ } catch (Exception ex) {
+ throw convertJedisAccessException(ex);
+ }
+ }
+
+ @Override
+ public List hPersist(byte[] key, byte[]... fields) {
+
+ Assert.notNull(key, "Key must not be null");
+ Assert.notNull(fields, "Fields must not be null");
+
+ try {
+ return connection.getCluster().hpersist(key, fields);
+ } catch (Exception ex) {
+ throw convertJedisAccessException(ex);
+ }
+ }
+
+ @Override
+ public List hTtl(byte[] key, byte[]... fields) {
+
+ Assert.notNull(key, "Key must not be null");
+ Assert.notNull(fields, "Fields must not be null");
+
+ try {
+ return connection.getCluster().httl(key, fields);
+ } catch (Exception ex) {
+ throw convertJedisAccessException(ex);
+ }
+ }
+
+ @Override
+ public List hTtl(byte[] key, TimeUnit timeUnit, byte[]... fields) {
+
+ Assert.notNull(key, "Key must not be null");
+ Assert.notNull(fields, "Fields must not be null");
+
+ try {
+ return connection.getCluster().httl(key, fields).stream()
+ .map(it -> it != null ? timeUnit.convert(it, TimeUnit.SECONDS) : null).toList();
+ } catch (Exception ex) {
+ throw convertJedisAccessException(ex);
+ }
+ }
+
+ @Override
+ public List hpTtl(byte[] key, byte[]... fields) {
+
+ Assert.notNull(key, "Key must not be null");
+ Assert.notNull(fields, "Fields must not be null");
+
+ try {
+ return connection.getCluster().hpttl(key, fields);
+ } catch (Exception ex) {
+ throw convertJedisAccessException(ex);
+ }
+ }
+
@Nullable
@Override
public Long hStrLen(byte[] key, byte[] field) {
@@ -298,7 +424,7 @@ public Long hStrLen(byte[] key, byte[] field) {
}
private DataAccessException convertJedisAccessException(Exception ex) {
-
return connection.convertJedisAccessException(ex);
}
+
}
diff --git a/src/main/java/org/springframework/data/redis/connection/jedis/JedisHashCommands.java b/src/main/java/org/springframework/data/redis/connection/jedis/JedisHashCommands.java
index a0ac8debf2..2e83d8aba0 100644
--- a/src/main/java/org/springframework/data/redis/connection/jedis/JedisHashCommands.java
+++ b/src/main/java/org/springframework/data/redis/connection/jedis/JedisHashCommands.java
@@ -16,6 +16,7 @@
package org.springframework.data.redis.connection.jedis;
import redis.clients.jedis.Jedis;
+import redis.clients.jedis.args.ExpiryOption;
import redis.clients.jedis.commands.PipelineBinaryCommands;
import redis.clients.jedis.params.ScanParams;
import redis.clients.jedis.resps.ScanResult;
@@ -25,8 +26,10 @@
import java.util.Map;
import java.util.Map.Entry;
import java.util.Set;
+import java.util.concurrent.TimeUnit;
import org.springframework.dao.InvalidDataAccessApiUsageException;
+import org.springframework.data.redis.connection.Hash.FieldExpirationOptions;
import org.springframework.data.redis.connection.RedisHashCommands;
import org.springframework.data.redis.connection.convert.Converters;
import org.springframework.data.redis.core.Cursor;
@@ -43,6 +46,7 @@
* @author Christoph Strobl
* @author Mark Paluch
* @author John Blum
+ * @author Tihomir Mateev
* @since 2.0
*/
class JedisHashCommands implements RedisHashCommands {
@@ -150,7 +154,8 @@ public List> hRandFieldWithValues(byte[] key, long count)
List> convertedMapEntryList = new ArrayList<>(mapEntryList.size());
- mapEntryList.forEach(entry -> convertedMapEntryList.add(Converters.entryOf(entry.getKey(), entry.getValue())));
+ mapEntryList
+ .forEach(entry -> convertedMapEntryList.add(Converters.entryOf(entry.getKey(), entry.getValue())));
return convertedMapEntryList;
@@ -237,8 +242,8 @@ protected ScanIteration> doScan(byte[] key, CursorId curso
ScanParams params = JedisConverters.toScanParams(options);
- ScanResult> result = connection.getJedis().hscan(key,
- JedisConverters.toBytes(cursorId), params);
+ ScanResult> result = connection.getJedis().hscan(key, JedisConverters.toBytes(cursorId),
+ params);
return new ScanIteration<>(CursorId.of(result.getCursor()), result.getResult());
}
@@ -250,6 +255,74 @@ protected void doClose() {
}.open();
}
+ @Override
+ public List hExpire(byte[] key, long seconds, FieldExpirationOptions.Condition condition, byte[]... fields) {
+
+ if (condition == FieldExpirationOptions.Condition.ALWAYS) {
+ return connection.invoke().just(Jedis::hexpire, PipelineBinaryCommands::hexpire, key, seconds, fields);
+ }
+
+ ExpiryOption option = ExpiryOption.valueOf(condition.name());
+ return connection.invoke().just(Jedis::hexpire, PipelineBinaryCommands::hexpire, key, seconds, option, fields);
+ }
+
+ @Override
+ public List hpExpire(byte[] key, long millis, FieldExpirationOptions.Condition condition, byte[]... fields) {
+
+ if (condition == FieldExpirationOptions.Condition.ALWAYS) {
+ return connection.invoke().just(Jedis::hpexpire, PipelineBinaryCommands::hpexpire, key, millis, fields);
+ }
+
+ ExpiryOption option = ExpiryOption.valueOf(condition.name());
+ return connection.invoke().just(Jedis::hpexpire, PipelineBinaryCommands::hpexpire, key, millis, option, fields);
+ }
+
+ @Override
+ public List hExpireAt(byte[] key, long unixTime, FieldExpirationOptions.Condition condition, byte[]... fields) {
+
+ if (condition == FieldExpirationOptions.Condition.ALWAYS) {
+ return connection.invoke().just(Jedis::hexpireAt, PipelineBinaryCommands::hexpireAt, key, unixTime, fields);
+ }
+
+ ExpiryOption option = ExpiryOption.valueOf(condition.name());
+ return connection.invoke().just(Jedis::hexpireAt, PipelineBinaryCommands::hexpireAt, key, unixTime, option, fields);
+ }
+
+ @Override
+ public List hpExpireAt(byte[] key, long unixTimeInMillis, FieldExpirationOptions.Condition condition,
+ byte[]... fields) {
+
+ if (condition == FieldExpirationOptions.Condition.ALWAYS) {
+ return connection.invoke().just(Jedis::hpexpireAt, PipelineBinaryCommands::hpexpireAt, key, unixTimeInMillis,
+ fields);
+ }
+
+ ExpiryOption option = ExpiryOption.valueOf(condition.name());
+ return connection.invoke().just(Jedis::hpexpireAt, PipelineBinaryCommands::hpexpireAt, key, unixTimeInMillis,
+ fields);
+ }
+
+ @Override
+ public List hPersist(byte[] key, byte[]... fields) {
+ return connection.invoke().just(Jedis::hpersist, PipelineBinaryCommands::hpersist, key, fields);
+ }
+
+ @Override
+ public List hTtl(byte[] key, byte[]... fields) {
+ return connection.invoke().just(Jedis::httl, PipelineBinaryCommands::httl, key, fields);
+ }
+
+ @Override
+ public List hTtl(byte[] key, TimeUnit timeUnit, byte[]... fields) {
+ return connection.invoke().fromMany(Jedis::httl, PipelineBinaryCommands::httl, key, fields)
+ .toList(Converters.secondsToTimeUnit(timeUnit));
+ }
+
+ @Override
+ public List hpTtl(byte[] key, byte[]... fields) {
+ return connection.invoke().just(Jedis::hpttl, PipelineBinaryCommands::hpttl, key, fields);
+ }
+
@Nullable
@Override
public Long hStrLen(byte[] key, byte[] field) {
diff --git a/src/main/java/org/springframework/data/redis/connection/lettuce/LettuceConnection.java b/src/main/java/org/springframework/data/redis/connection/lettuce/LettuceConnection.java
index fc8460d514..fe646b3a1b 100644
--- a/src/main/java/org/springframework/data/redis/connection/lettuce/LettuceConnection.java
+++ b/src/main/java/org/springframework/data/redis/connection/lettuce/LettuceConnection.java
@@ -1160,6 +1160,14 @@ static class TypeHints {
COMMAND_OUTPUT_TYPE_MAPPING.put(PFMERGE, IntegerOutput.class);
COMMAND_OUTPUT_TYPE_MAPPING.put(PFADD, IntegerOutput.class);
+ COMMAND_OUTPUT_TYPE_MAPPING.put(HEXPIRE, IntegerListOutput.class);
+ COMMAND_OUTPUT_TYPE_MAPPING.put(HEXPIREAT, IntegerListOutput.class);
+ COMMAND_OUTPUT_TYPE_MAPPING.put(HPEXPIRE, IntegerListOutput.class);
+ COMMAND_OUTPUT_TYPE_MAPPING.put(HPEXPIREAT, IntegerListOutput.class);
+ COMMAND_OUTPUT_TYPE_MAPPING.put(HPERSIST, IntegerListOutput.class);
+ COMMAND_OUTPUT_TYPE_MAPPING.put(HTTL, IntegerListOutput.class);
+ COMMAND_OUTPUT_TYPE_MAPPING.put(HPTTL, IntegerListOutput.class);
+
// DOUBLE
COMMAND_OUTPUT_TYPE_MAPPING.put(HINCRBYFLOAT, DoubleOutput.class);
COMMAND_OUTPUT_TYPE_MAPPING.put(INCRBYFLOAT, DoubleOutput.class);
diff --git a/src/main/java/org/springframework/data/redis/connection/lettuce/LettuceHashCommands.java b/src/main/java/org/springframework/data/redis/connection/lettuce/LettuceHashCommands.java
index e4b53f4fb4..032d6230d6 100644
--- a/src/main/java/org/springframework/data/redis/connection/lettuce/LettuceHashCommands.java
+++ b/src/main/java/org/springframework/data/redis/connection/lettuce/LettuceHashCommands.java
@@ -15,17 +15,21 @@
*/
package org.springframework.data.redis.connection.lettuce;
+import io.lettuce.core.ExpireArgs;
import io.lettuce.core.KeyValue;
import io.lettuce.core.MapScanCursor;
import io.lettuce.core.ScanArgs;
import io.lettuce.core.api.async.RedisHashAsyncCommands;
+import io.lettuce.core.protocol.CommandArgs;
import java.util.List;
import java.util.Map;
import java.util.Map.Entry;
import java.util.Set;
+import java.util.concurrent.TimeUnit;
import org.springframework.dao.InvalidDataAccessApiUsageException;
+import org.springframework.data.redis.connection.Hash.FieldExpirationOptions;
import org.springframework.data.redis.connection.RedisHashCommands;
import org.springframework.data.redis.connection.convert.Converters;
import org.springframework.data.redis.core.Cursor;
@@ -35,10 +39,12 @@
import org.springframework.data.redis.core.ScanOptions;
import org.springframework.lang.Nullable;
import org.springframework.util.Assert;
+import org.springframework.util.ObjectUtils;
/**
* @author Christoph Strobl
* @author Mark Paluch
+ * @author Tihomir Mateev
* @since 2.0
*/
class LettuceHashCommands implements RedisHashCommands {
@@ -208,6 +214,51 @@ public Cursor> hScan(byte[] key, ScanOptions options) {
return hScan(key, CursorId.initial(), options);
}
+ @Override
+ public List hExpire(byte[] key, long seconds, FieldExpirationOptions.Condition condition, byte[]... fields) {
+ return connection.invoke().fromMany(RedisHashAsyncCommands::hexpire, key, seconds, getExpireArgs(condition), fields)
+ .toList();
+ }
+
+ @Override
+ public List hpExpire(byte[] key, long millis, FieldExpirationOptions.Condition condition, byte[]... fields) {
+ return connection.invoke().fromMany(RedisHashAsyncCommands::hpexpire, key, millis, getExpireArgs(condition), fields)
+ .toList();
+ }
+
+ @Override
+ public List hExpireAt(byte[] key, long unixTime, FieldExpirationOptions.Condition condition, byte[]... fields) {
+ return connection.invoke()
+ .fromMany(RedisHashAsyncCommands::hexpireat, key, unixTime, getExpireArgs(condition), fields).toList();
+ }
+
+ @Override
+ public List hpExpireAt(byte[] key, long unixTimeInMillis, FieldExpirationOptions.Condition condition,
+ byte[]... fields) {
+ return connection.invoke()
+ .fromMany(RedisHashAsyncCommands::hpexpireat, key, unixTimeInMillis, getExpireArgs(condition), fields).toList();
+ }
+
+ @Override
+ public List hPersist(byte[] key, byte[]... fields) {
+ return connection.invoke().fromMany(RedisHashAsyncCommands::hpersist, key, fields).toList();
+ }
+
+ @Override
+ public List hTtl(byte[] key, byte[]... fields) {
+ return connection.invoke().fromMany(RedisHashAsyncCommands::httl, key, fields).toList();
+ }
+
+ @Override
+ public List hTtl(byte[] key, TimeUnit timeUnit, byte[]... fields) {
+ return connection.invoke().fromMany(RedisHashAsyncCommands::httl, key, fields)
+ .toList(Converters.secondsToTimeUnit(timeUnit));
+ }
+
+ @Override
+ public List hpTtl(byte[] key, byte[]... fields) {
+ return connection.invoke().fromMany(RedisHashAsyncCommands::hpttl, key, fields).toList();
+ }
/**
* @param key
@@ -263,4 +314,19 @@ private static Entry toEntry(KeyValue value) {
return value.hasValue() ? Converters.entryOf(value.getKey(), value.getValue()) : null;
}
+ private ExpireArgs getExpireArgs(FieldExpirationOptions.Condition condition) {
+
+ return new ExpireArgs() {
+ @Override
+ public void build(CommandArgs args) {
+
+ if (ObjectUtils.nullSafeEquals(condition, FieldExpirationOptions.Condition.ALWAYS)) {
+ return;
+ }
+
+ args.add(condition.name());
+ }
+ };
+ }
+
}
diff --git a/src/main/java/org/springframework/data/redis/connection/lettuce/LettuceReactiveHashCommands.java b/src/main/java/org/springframework/data/redis/connection/lettuce/LettuceReactiveHashCommands.java
index b704321ef5..84dd2ca906 100644
--- a/src/main/java/org/springframework/data/redis/connection/lettuce/LettuceReactiveHashCommands.java
+++ b/src/main/java/org/springframework/data/redis/connection/lettuce/LettuceReactiveHashCommands.java
@@ -15,8 +15,10 @@
*/
package org.springframework.data.redis.connection.lettuce;
+import io.lettuce.core.ExpireArgs;
import io.lettuce.core.KeyValue;
import io.lettuce.core.ScanStream;
+import io.lettuce.core.protocol.CommandArgs;
import reactor.core.publisher.Flux;
import reactor.core.publisher.Mono;
@@ -25,10 +27,11 @@
import java.util.List;
import java.util.Map;
import java.util.Map.Entry;
+import java.util.concurrent.TimeUnit;
import java.util.stream.Collectors;
import org.reactivestreams.Publisher;
-
+import org.springframework.data.redis.connection.Hash.FieldExpirationOptions;
import org.springframework.data.redis.connection.ReactiveHashCommands;
import org.springframework.data.redis.connection.ReactiveRedisConnection.BooleanResponse;
import org.springframework.data.redis.connection.ReactiveRedisConnection.CommandResponse;
@@ -38,6 +41,7 @@
import org.springframework.data.redis.connection.ReactiveRedisConnection.NumericResponse;
import org.springframework.data.redis.connection.convert.Converters;
import org.springframework.util.Assert;
+import org.springframework.util.ObjectUtils;
/**
* @author Christoph Strobl
@@ -264,6 +268,93 @@ public Flux> hStrLen(Publisher> applyExpiration(Publisher commands) {
+
+ return connection.execute(cmd -> Flux.from(commands).concatMap(command -> {
+
+ Assert.notNull(command.getKey(), "Key must not be null");
+ Assert.notNull(command.getFields(), "Fields must not be null");
+
+ ByteBuffer[] fields = command.getFields().toArray(ByteBuffer[]::new);
+
+ if (command.getExpiration().isPersistent()) {
+ return cmd.hpersist(command.getKey(), fields).map(value -> new NumericResponse<>(command, value));
+ }
+
+ ExpireArgs args = new ExpireArgs() {
+
+ @Override
+ public void build(CommandArgs args) {
+ super.build(args);
+ if (ObjectUtils.nullSafeEquals(command.getOptions(), FieldExpirationOptions.none())) {
+ return;
+ }
+
+ args.add(command.getOptions().getCondition().name());
+ }
+ };
+
+ if (command.getExpiration().isUnixTimestamp()) {
+
+ if (command.getExpiration().getTimeUnit().equals(TimeUnit.MILLISECONDS)) {
+ return cmd
+ .hpexpireat(command.getKey(), command.getExpiration().getExpirationTimeInMilliseconds(), args, fields)
+ .map(value -> new NumericResponse<>(command, value));
+ }
+ return cmd.hexpireat(command.getKey(), command.getExpiration().getExpirationTimeInSeconds(), args, fields)
+ .map(value -> new NumericResponse<>(command, value));
+ }
+
+ if (command.getExpiration().getTimeUnit().equals(TimeUnit.MILLISECONDS)) {
+ return cmd.hpexpire(command.getKey(), command.getExpiration().getExpirationTimeInMilliseconds(), args, fields)
+ .map(value -> new NumericResponse<>(command, value));
+ }
+
+ return cmd.hexpire(command.getKey(), command.getExpiration().getExpirationTimeInSeconds(), args, fields)
+ .map(value -> new NumericResponse<>(command, value));
+ }));
+ }
+
+ @Override
+ public Flux> hPersist(Publisher commands) {
+
+ return connection.execute(cmd -> Flux.from(commands).concatMap(command -> {
+
+ Assert.notNull(command.getKey(), "Key must not be null");
+ Assert.notNull(command.getFields(), "Fields must not be null");
+
+ return cmd.hpersist(command.getKey(), command.getFields().toArray(ByteBuffer[]::new))
+ .map(value -> new NumericResponse<>(command, value));
+ }));
+ }
+
+ @Override
+ public Flux> hTtl(Publisher commands) {
+
+ return connection.execute(cmd -> Flux.from(commands).concatMap(command -> {
+
+ Assert.notNull(command.getKey(), "Key must not be null");
+ Assert.notNull(command.getFields(), "Fields must not be null");
+
+ return cmd.httl(command.getKey(), command.getFields().toArray(ByteBuffer[]::new))
+ .map(value -> new NumericResponse<>(command, value));
+ }));
+ }
+
+ @Override
+ public Flux> hpTtl(Publisher commands) {
+
+ return connection.execute(cmd -> Flux.from(commands).concatMap(command -> {
+
+ Assert.notNull(command.getKey(), "Key must not be null");
+ Assert.notNull(command.getFields(), "Fields must not be null");
+
+ return cmd.hpttl(command.getKey(), command.getFields().toArray(ByteBuffer[]::new))
+ .map(value -> new NumericResponse<>(command, value));
+ }));
+ }
+
private static Map.Entry toEntry(KeyValue kv) {
return new Entry() {
diff --git a/src/main/java/org/springframework/data/redis/core/BoundHashFieldExpirationOperations.java b/src/main/java/org/springframework/data/redis/core/BoundHashFieldExpirationOperations.java
new file mode 100644
index 0000000000..27779a88a8
--- /dev/null
+++ b/src/main/java/org/springframework/data/redis/core/BoundHashFieldExpirationOperations.java
@@ -0,0 +1,111 @@
+/*
+ * Copyright 2025 the original author or authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * https://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.springframework.data.redis.core;
+
+import java.time.Duration;
+import java.time.Instant;
+import java.util.concurrent.TimeUnit;
+
+import org.springframework.data.redis.connection.Hash;
+import org.springframework.data.redis.core.types.Expiration;
+import org.springframework.data.redis.core.types.Expirations;
+import org.springframework.lang.Nullable;
+
+/**
+ * Hash Field Expiration operations bound to a certain hash key and set of hash fields.
+ *
+ * @param type of the hash field names.
+ * @author Mark Paluch
+ * @since 3.5
+ */
+public interface BoundHashFieldExpirationOperations {
+
+ /**
+ * Apply {@link Expiration} to the hash without any additional constraints.
+ *
+ * @param expiration the expiration definition.
+ * @return changes to the hash fields. {@literal null} when used in pipeline / transaction.
+ */
+ default ExpireChanges expire(Expiration expiration) {
+ return expire(expiration, Hash.FieldExpirationOptions.none());
+ }
+
+ /**
+ * Apply {@link Expiration} to the hash fields given {@link Hash.FieldExpirationOptions expiration options}.
+ *
+ * @param expiration the expiration definition.
+ * @param options expiration options.
+ * @return changes to the hash fields. {@literal null} when used in pipeline / transaction.
+ */
+ ExpireChanges expire(Expiration expiration, Hash.FieldExpirationOptions options);
+
+ /**
+ * Set time to live for given {@code hashKey}.
+ *
+ * @param timeout the amount of time after which the key will be expired, must not be {@literal null}.
+ * @return changes to the hash fields. {@literal null} when used in pipeline / transaction.
+ * @throws IllegalArgumentException if the timeout is {@literal null}.
+ * @see Redis Documentation: HEXPIRE
+ * @since 3.5
+ */
+ @Nullable
+ ExpireChanges expire(Duration timeout);
+
+ /**
+ * Set the expiration for given {@code hashKey} as a {@literal date} timestamp.
+ *
+ * @param expireAt must not be {@literal null}.
+ * @return changes to the hash fields. {@literal null} when used in pipeline / transaction.
+ * @throws IllegalArgumentException if the instant is {@literal null} or too large to represent as a {@code Date}.
+ * @see Redis Documentation: HEXPIRE
+ * @since 3.5
+ */
+ @Nullable
+ ExpireChanges expireAt(Instant expireAt);
+
+ /**
+ * Remove the expiration from given {@code hashKey} .
+ *
+ * @return changes to the hash fields. {@literal null} when used in pipeline / transaction.
+ * @see Redis Documentation: HPERSIST
+ * @since 3.5
+ */
+ @Nullable
+ ExpireChanges persist();
+
+ /**
+ * Get the time to live for {@code hashKey} in seconds.
+ *
+ * @return the actual expirations in seconds for the hash fields. {@literal null} when used in pipeline / transaction.
+ * @see Redis Documentation: HTTL
+ * @since 3.5
+ */
+ @Nullable
+ Expirations getTimeToLive();
+
+ /**
+ * Get the time to live for {@code hashKey} and convert it to the given {@link TimeUnit}.
+ *
+ * @param timeUnit must not be {@literal null}.
+ * @return the actual expirations for the hash fields in the given time unit. {@literal null} when used in pipeline /
+ * transaction.
+ * @see Redis Documentation: HTTL
+ * @since 3.5
+ */
+ @Nullable
+ Expirations getTimeToLive(TimeUnit timeUnit);
+
+}
diff --git a/src/main/java/org/springframework/data/redis/core/BoundHashOperations.java b/src/main/java/org/springframework/data/redis/core/BoundHashOperations.java
index f906462911..0d287e929f 100644
--- a/src/main/java/org/springframework/data/redis/core/BoundHashOperations.java
+++ b/src/main/java/org/springframework/data/redis/core/BoundHashOperations.java
@@ -15,6 +15,7 @@
*/
package org.springframework.data.redis.core;
+import java.util.Arrays;
import java.util.Collection;
import java.util.List;
import java.util.Map;
@@ -29,6 +30,7 @@
* @author Christoph Strobl
* @author Ninad Divadkar
* @author Mark Paluch
+ * @author Tihomir Mateev
*/
public interface BoundHashOperations extends BoundKeyOperations {
@@ -90,7 +92,7 @@ public interface BoundHashOperations extends BoundKeyOperations {
Double increment(HK key, double delta);
/**
- * Return a random key (aka field) from the hash stored at the bound key.
+ * Return a random key from the hash stored at the bound key.
*
* @return {@literal null} if the hash does not exist or when used in pipeline / transaction.
* @since 2.6
@@ -110,10 +112,10 @@ public interface BoundHashOperations extends BoundKeyOperations {
Map.Entry randomEntry();
/**
- * Return a random keys (aka fields) from the hash stored at the bound key. If the provided {@code count} argument is
- * positive, return a list of distinct keys, capped either at {@code count} or the hash size. If {@code count} is
- * negative, the behavior changes and the command is allowed to return the same key multiple times. In this case, the
- * number of returned keys is the absolute value of the specified count.
+ * Return a random keys from the hash stored at the bound key. If the provided {@code count} argument is positive,
+ * return a list of distinct keys, capped either at {@code count} or the hash size. If {@code count} is negative, the
+ * behavior changes and the command is allowed to return the same key multiple times. In this case, the number of
+ * returned keys is the absolute value of the specified count.
*
* @param count number of keys to return.
* @return {@literal null} if key does not exist or when used in pipeline / transaction.
@@ -213,8 +215,45 @@ public interface BoundHashOperations extends BoundKeyOperations {
*/
Cursor> scan(ScanOptions options);
+ /**
+ * Returns a bound operations object to perform operations on the hash field expiration for all hash fields at the
+ * bound {@code key}. Operations on the expiration object obtain keys at the time of invoking any expiration
+ * operation.
+ *
+ * @return the bound operations object to perform operations on the hash field expiration.
+ * @since 3.5
+ */
+ default BoundHashFieldExpirationOperations expiration() {
+ return new DefaultBoundHashFieldExpirationOperations<>(getOperations().opsForHash(), getKey(), this::keys);
+ }
+
+ /**
+ * Returns a bound operations object to perform operations on the hash field expiration for all hash fields at the
+ * bound {@code key} for the given hash fields.
+ *
+ * @param hashFields collection of hash fields to operate on.
+ * @return the bound operations object to perform operations on the hash field expiration.
+ * @since 3.5
+ */
+ default BoundHashFieldExpirationOperations expiration(HK... hashFields) {
+ return expiration(Arrays.asList(hashFields));
+ }
+
+ /**
+ * Returns a bound operations object to perform operations on the hash field expiration for all hash fields at the
+ * bound {@code key} for the given hash fields.
+ *
+ * @param hashFields collection of hash fields to operate on.
+ * @return the bound operations object to perform operations on the hash field expiration.
+ * @since 3.5
+ */
+ default BoundHashFieldExpirationOperations expiration(Collection hashFields) {
+ return new DefaultBoundHashFieldExpirationOperations<>(getOperations().opsForHash(), getKey(), () -> hashFields);
+ }
+
/**
* @return never {@literal null}.
*/
RedisOperations getOperations();
+
}
diff --git a/src/main/java/org/springframework/data/redis/core/DefaultBoundHashFieldExpirationOperations.java b/src/main/java/org/springframework/data/redis/core/DefaultBoundHashFieldExpirationOperations.java
new file mode 100644
index 0000000000..8dbabe6dd5
--- /dev/null
+++ b/src/main/java/org/springframework/data/redis/core/DefaultBoundHashFieldExpirationOperations.java
@@ -0,0 +1,93 @@
+/*
+ * Copyright 2025 the original author or authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * https://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.springframework.data.redis.core;
+
+import java.time.Duration;
+import java.time.Instant;
+import java.util.Collection;
+import java.util.concurrent.TimeUnit;
+import java.util.function.Supplier;
+
+import org.springframework.data.redis.connection.Hash;
+import org.springframework.data.redis.core.types.Expiration;
+import org.springframework.data.redis.core.types.Expirations;
+import org.springframework.lang.Nullable;
+import org.springframework.util.Assert;
+
+/**
+ * Default implementation of {@link BoundHashFieldExpirationOperations}.
+ *
+ * @author Mark Paluch
+ * @since 3.5
+ */
+class DefaultBoundHashFieldExpirationOperations implements BoundHashFieldExpirationOperations {
+
+ private final HashOperations operations;
+ private final H key;
+ private final Supplier extends Collection> hashFields;
+
+ public DefaultBoundHashFieldExpirationOperations(HashOperations operations, H key,
+ Supplier extends Collection> hashFields) {
+
+ this.operations = operations;
+ this.key = key;
+ this.hashFields = hashFields;
+ }
+
+ @Override
+ public ExpireChanges expire(Expiration expiration, Hash.FieldExpirationOptions options) {
+ return operations.expire(key, expiration, options, getHashKeys());
+ }
+
+ @Nullable
+ @Override
+ public ExpireChanges expire(Duration timeout) {
+ return operations.expire(key, timeout, getHashKeys());
+ }
+
+ @Nullable
+ @Override
+ public ExpireChanges expireAt(Instant expireAt) {
+ return operations.expireAt(key, expireAt, getHashKeys());
+ }
+
+ @Nullable
+ @Override
+ public ExpireChanges persist() {
+ return operations.persist(key, getHashKeys());
+ }
+
+ @Nullable
+ @Override
+ public Expirations getTimeToLive() {
+ return operations.getTimeToLive(key, getHashKeys());
+ }
+
+ @Nullable
+ @Override
+ public Expirations getTimeToLive(TimeUnit timeUnit) {
+ return operations.getTimeToLive(key, timeUnit, getHashKeys());
+ }
+
+ private Collection getHashKeys() {
+
+ Collection hks = hashFields.get();
+
+ Assert.state(hks != null, "Hash keys must not be null");
+ return hks;
+ }
+
+}
diff --git a/src/main/java/org/springframework/data/redis/core/DefaultHashOperations.java b/src/main/java/org/springframework/data/redis/core/DefaultHashOperations.java
index 974e20e13f..804617616f 100644
--- a/src/main/java/org/springframework/data/redis/core/DefaultHashOperations.java
+++ b/src/main/java/org/springframework/data/redis/core/DefaultHashOperations.java
@@ -15,6 +15,8 @@
*/
package org.springframework.data.redis.core;
+import java.time.Duration;
+import java.time.Instant;
import java.util.Collection;
import java.util.Collections;
import java.util.LinkedHashMap;
@@ -22,9 +24,14 @@
import java.util.Map;
import java.util.Map.Entry;
import java.util.Set;
+import java.util.concurrent.TimeUnit;
import org.springframework.core.convert.converter.Converter;
+import org.springframework.data.redis.connection.Hash.FieldExpirationOptions;
import org.springframework.data.redis.connection.convert.Converters;
+import org.springframework.data.redis.core.types.Expiration;
+import org.springframework.data.redis.core.types.Expirations;
+import org.springframework.data.redis.core.types.Expirations.Timeouts;
import org.springframework.lang.Nullable;
import org.springframework.util.Assert;
@@ -34,6 +41,7 @@
* @author Costin Leau
* @author Christoph Strobl
* @author Ninad Divadkar
+ * @author Tihomir Mateev
*/
class DefaultHashOperations extends AbstractOperations implements HashOperations {
@@ -210,6 +218,86 @@ public Boolean putIfAbsent(K key, HK hashKey, HV value) {
return execute(connection -> connection.hSetNX(rawKey, rawHashKey, rawHashValue));
}
+ @Override
+ public ExpireChanges expire(K key, Duration duration, Collection hashKeys) {
+
+ List orderedKeys = List.copyOf(hashKeys);
+
+ byte[] rawKey = rawKey(key);
+ byte[][] rawHashKeys = rawHashKeys(orderedKeys.toArray());
+ boolean hasMillis = TimeoutUtils.hasMillis(duration);
+
+ List raw = execute(connection -> TimeoutUtils.hasMillis(duration)
+ ? connection.hashCommands().hpExpire(rawKey, duration.toMillis(), rawHashKeys)
+ : connection.hashCommands().hExpire(rawKey, TimeoutUtils.toSeconds(duration), rawHashKeys));
+
+ return raw != null ? ExpireChanges.of(orderedKeys, raw) : null;
+ }
+
+ @Override
+ public ExpireChanges expireAt(K key, Instant instant, Collection hashKeys) {
+
+ List orderedKeys = List.copyOf(hashKeys);
+
+ byte[] rawKey = rawKey(key);
+ byte[][] rawHashKeys = rawHashKeys(orderedKeys.toArray());
+ long millis = instant.toEpochMilli();
+
+ List raw = execute(connection -> TimeoutUtils.containsSplitSecond(millis)
+ ? connection.hashCommands().hpExpireAt(rawKey, millis, rawHashKeys)
+ : connection.hashCommands().hExpireAt(rawKey, instant.getEpochSecond(), rawHashKeys));
+
+ return raw != null ? ExpireChanges.of(orderedKeys, raw) : null;
+ }
+
+ @Override
+ public ExpireChanges expire(K key, Expiration expiration, FieldExpirationOptions options, Collection hashKeys) {
+
+ List orderedKeys = List.copyOf(hashKeys);
+ byte[] rawKey = rawKey(key);
+ byte[][] rawHashKeys = rawHashKeys(orderedKeys.toArray());
+ List raw = execute(
+ connection -> connection.hashCommands().applyExpiration(rawKey, expiration, options, rawHashKeys));
+
+ return raw != null ? ExpireChanges.of(orderedKeys, raw) : null;
+ }
+
+ @Override
+ public ExpireChanges persist(K key, Collection hashKeys) {
+
+ List orderedKeys = List.copyOf(hashKeys);
+ byte[] rawKey = rawKey(key);
+ byte[][] rawHashKeys = rawHashKeys(orderedKeys.toArray());
+
+ List raw = execute(connection -> connection.hashCommands().hPersist(rawKey, rawHashKeys));
+
+ return raw != null ? ExpireChanges.of(orderedKeys, raw) : null;
+ }
+
+ @Override
+ public Expirations getTimeToLive(K key, TimeUnit timeUnit, Collection hashKeys) {
+
+ if(timeUnit.compareTo(TimeUnit.MILLISECONDS) < 0) {
+ throw new IllegalArgumentException("%s precision is not supported must be >= MILLISECONDS".formatted(timeUnit));
+ }
+
+ List orderedKeys = List.copyOf(hashKeys);
+
+ byte[] rawKey = rawKey(key);
+ byte[][] rawHashKeys = rawHashKeys(orderedKeys.toArray());
+
+ List raw = execute(
+ connection -> TimeUnit.MILLISECONDS.equals(timeUnit) ? connection.hashCommands().hpTtl(rawKey, rawHashKeys)
+ : connection.hashCommands().hTtl(rawKey, timeUnit, rawHashKeys));
+
+ if (raw == null) {
+ return null;
+ }
+
+ Timeouts timeouts = new Timeouts(TimeUnit.MILLISECONDS.equals(timeUnit) ? timeUnit : TimeUnit.SECONDS, raw);
+ return Expirations.of(timeUnit, orderedKeys, timeouts);
+ }
+
@Override
public List values(K key) {
diff --git a/src/main/java/org/springframework/data/redis/core/DefaultReactiveHashOperations.java b/src/main/java/org/springframework/data/redis/core/DefaultReactiveHashOperations.java
index c3e004c25d..540b351778 100644
--- a/src/main/java/org/springframework/data/redis/core/DefaultReactiveHashOperations.java
+++ b/src/main/java/org/springframework/data/redis/core/DefaultReactiveHashOperations.java
@@ -19,16 +19,25 @@
import reactor.core.publisher.Mono;
import java.nio.ByteBuffer;
+import java.time.Duration;
+import java.time.Instant;
import java.util.ArrayList;
import java.util.Collection;
import java.util.List;
import java.util.Map;
+import java.util.concurrent.TimeUnit;
import java.util.function.Function;
import org.reactivestreams.Publisher;
import org.springframework.dao.InvalidDataAccessApiUsageException;
+import org.springframework.data.redis.connection.Hash.FieldExpirationOptions;
import org.springframework.data.redis.connection.ReactiveHashCommands;
+import org.springframework.data.redis.connection.ReactiveHashCommands.ExpireCommand;
+import org.springframework.data.redis.connection.ReactiveRedisConnection.NumericResponse;
import org.springframework.data.redis.connection.convert.Converters;
+import org.springframework.data.redis.core.types.Expiration;
+import org.springframework.data.redis.core.types.Expirations;
+import org.springframework.data.redis.core.types.Expirations.Timeouts;
import org.springframework.data.redis.serializer.RedisSerializationContext;
import org.springframework.lang.Nullable;
import org.springframework.util.Assert;
@@ -63,8 +72,7 @@ public Mono remove(H key, Object... hashKeys) {
Assert.noNullElements(hashKeys, "Hash keys must not contain null elements");
return createMono(hashCommands -> Flux.fromArray(hashKeys) //
- .map(hashKey -> (HK) hashKey)
- .map(this::rawHashKey) //
+ .map(hashKey -> (HK) hashKey).map(this::rawHashKey) //
.collectList() //
.flatMap(hks -> hashCommands.hDel(rawKey(key), hks)));
}
@@ -86,8 +94,8 @@ public Mono get(H key, Object hashKey) {
Assert.notNull(key, "Key must not be null");
Assert.notNull(hashKey, "Hash key must not be null");
- return createMono(hashCommands -> hashCommands.hGet(rawKey(key), rawHashKey((HK) hashKey))
- .map(this::readHashValue));
+ return createMono(
+ hashCommands -> hashCommands.hGet(rawKey(key), rawHashKey((HK) hashKey)).map(this::readHashValue));
}
@Override
@@ -109,8 +117,8 @@ public Mono increment(H key, HK hashKey, long delta) {
Assert.notNull(key, "Key must not be null");
Assert.notNull(hashKey, "Hash key must not be null");
- return template.doCreateMono(connection -> connection.numberCommands()
- .hIncrBy(rawKey(key), rawHashKey(hashKey), delta));
+ return template
+ .doCreateMono(connection -> connection.numberCommands().hIncrBy(rawKey(key), rawHashKey(hashKey), delta));
}
@Override
@@ -119,8 +127,8 @@ public Mono increment(H key, HK hashKey, double delta) {
Assert.notNull(key, "Key must not be null");
Assert.notNull(hashKey, "Hash key must not be null");
- return template.doCreateMono(connection -> connection.numberCommands()
- .hIncrBy(rawKey(key), rawHashKey(hashKey), delta));
+ return template
+ .doCreateMono(connection -> connection.numberCommands().hIncrBy(rawKey(key), rawHashKey(hashKey), delta));
}
@Override
@@ -137,8 +145,7 @@ public Mono> randomEntry(H key) {
Assert.notNull(key, "Key must not be null");
- return createMono(hashCommands -> hashCommands.hRandFieldWithValues(rawKey(key)))
- .map(this::deserializeHashEntry);
+ return createMono(hashCommands -> hashCommands.hRandFieldWithValues(rawKey(key))).map(this::deserializeHashEntry);
}
@Override
@@ -235,6 +242,81 @@ public Flux> scan(H key, ScanOptions options) {
.map(this::deserializeHashEntry));
}
+ @Override
+ public Mono> expire(H key, Duration timeout, Collection hashKeys) {
+ return expire(key, Expiration.from(timeout), FieldExpirationOptions.none(), hashKeys);
+ }
+
+ @Override
+ public Mono> expire(H key, Expiration expiration, FieldExpirationOptions options,
+ Collection hashKeys) {
+
+ List orderedKeys = List.copyOf(hashKeys);
+ ByteBuffer rawKey = rawKey(key);
+ List rawHashKeys = orderedKeys.stream().map(this::rawHashKey).toList();
+
+ Mono> raw = createFlux(connection -> {
+ return connection
+ .applyExpiration(Mono.just(ExpireCommand.expire(rawHashKeys, expiration).from(rawKey).withOptions(options)))
+ .map(NumericResponse::getOutput);
+ }).collectList();
+
+ return raw.map(values -> ExpireChanges.of(orderedKeys, values));
+ }
+
+ @Nullable
+ @Override
+ public Mono> expireAt(H key, Instant expireAt, Collection hashKeys) {
+
+ List orderedKeys = List.copyOf(hashKeys);
+ ByteBuffer rawKey = rawKey(key);
+ List